Fix build am: 6f85d83163 am: cea6041539 am: ac11d74c81
am: 94742171f1

Change-Id: Ifa1e2ab0d259b2919a4f250d84209987aa0eb71a
diff --git a/camera/Android.mk b/camera/Android.mk
index 471cb0d..1a3382f 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -18,7 +18,24 @@
 
 LOCAL_PATH := $(CAMERA_CLIENT_LOCAL_PATH)
 
-LOCAL_SRC_FILES:= \
+LOCAL_AIDL_INCLUDES := \
+    frameworks/av/camera/aidl \
+    frameworks/base/core/java \
+    frameworks/native/aidl/gui
+
+# AIDL files for camera interfaces
+# The headers for these interfaces will be available to any modules that
+# include libcamera_client, at the path "aidl/package/path/BnFoo.h"
+
+LOCAL_SRC_FILES := \
+    aidl/android/hardware/ICameraService.aidl \
+    aidl/android/hardware/ICameraServiceListener.aidl \
+    aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl \
+    aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+
+# Source for camera interface parcelables, and manually-written interfaces
+
+LOCAL_SRC_FILES += \
 	Camera.cpp \
 	CameraMetadata.cpp \
 	CameraParameters.cpp \
@@ -26,15 +43,12 @@
 	CameraParameters2.cpp \
 	ICamera.cpp \
 	ICameraClient.cpp \
-	ICameraService.cpp \
-	ICameraServiceListener.cpp \
 	ICameraServiceProxy.cpp \
 	ICameraRecordingProxy.cpp \
 	ICameraRecordingProxyListener.cpp \
-	camera2/ICameraDeviceUser.cpp \
-	camera2/ICameraDeviceCallbacks.cpp \
 	camera2/CaptureRequest.cpp \
 	camera2/OutputConfiguration.cpp \
+	camera2/SubmitInfo.cpp \
 	CameraBase.cpp \
 	CameraUtils.cpp \
 	VendorTagDescriptor.cpp
@@ -52,6 +66,14 @@
 LOCAL_C_INCLUDES += \
 	system/media/camera/include \
 	system/media/private/camera/include \
+	frameworks/native/include/media/openmax \
+	frameworks/av/include/camera
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+	system/media/camera/include \
+	frameworks/av/include/camera
+
+LOCAL_CFLAGS += -Werror -Wall -Wextra
 
 LOCAL_MODULE:= libcamera_client
 
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 9bf3134..bf9904c 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -24,10 +24,10 @@
 #include <binder/IServiceManager.h>
 #include <binder/IMemory.h>
 
-#include <camera/Camera.h>
-#include <camera/ICameraRecordingProxyListener.h>
-#include <camera/ICameraService.h>
-#include <camera/ICamera.h>
+#include <Camera.h>
+#include <ICameraRecordingProxyListener.h>
+#include <android/hardware/ICameraService.h>
+#include <android/hardware/ICamera.h>
 
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
@@ -40,10 +40,10 @@
 }
 
 CameraTraits<Camera>::TCamConnectService CameraTraits<Camera>::fnConnectService =
-        &ICameraService::connect;
+        &::android::hardware::ICameraService::connect;
 
 // construct a camera client from an existing camera remote
-sp<Camera> Camera::create(const sp<ICamera>& camera)
+sp<Camera> Camera::create(const sp<::android::hardware::ICamera>& camera)
 {
      ALOGV("create");
      if (camera == 0) {
@@ -72,9 +72,9 @@
 }
 
 sp<Camera> Camera::connect(int cameraId, const String16& clientPackageName,
-        int clientUid)
+        int clientUid, int clientPid)
 {
-    return CameraBaseT::connect(cameraId, clientPackageName, clientUid);
+    return CameraBaseT::connect(cameraId, clientPackageName, clientUid, clientPid);
 }
 
 status_t Camera::connectLegacy(int cameraId, int halVersion,
@@ -84,21 +84,51 @@
 {
     ALOGV("%s: connect legacy camera device", __FUNCTION__);
     sp<Camera> c = new Camera(cameraId);
-    sp<ICameraClient> cl = c;
+    sp<::android::hardware::ICameraClient> cl = c;
     status_t status = NO_ERROR;
-    const sp<ICameraService>& cs = CameraBaseT::getCameraService();
+    const sp<::android::hardware::ICameraService>& cs = CameraBaseT::getCameraService();
 
-    if (cs != 0) {
-        status = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName,
-                                        clientUid, /*out*/c->mCamera);
+    binder::Status ret;
+    if (cs != nullptr) {
+        ret = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName,
+                clientUid, /*out*/&(c->mCamera));
     }
-    if (status == OK && c->mCamera != 0) {
+    if (ret.isOk() && c->mCamera != nullptr) {
         IInterface::asBinder(c->mCamera)->linkToDeath(c);
         c->mStatus = NO_ERROR;
         camera = c;
     } else {
-        ALOGW("An error occurred while connecting to camera %d: %d (%s)",
-                cameraId, status, strerror(-status));
+        switch(ret.serviceSpecificErrorCode()) {
+            case hardware::ICameraService::ERROR_DISCONNECTED:
+                status = -ENODEV;
+                break;
+            case hardware::ICameraService::ERROR_CAMERA_IN_USE:
+                status = -EBUSY;
+                break;
+            case hardware::ICameraService::ERROR_INVALID_OPERATION:
+                status = -EINVAL;
+                break;
+            case hardware::ICameraService::ERROR_MAX_CAMERAS_IN_USE:
+                status = -EUSERS;
+                break;
+            case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
+                status = BAD_VALUE;
+                break;
+            case hardware::ICameraService::ERROR_DEPRECATED_HAL:
+                status = -EOPNOTSUPP;
+                break;
+            case hardware::ICameraService::ERROR_DISABLED:
+                status = -EACCES;
+                break;
+            case hardware::ICameraService::ERROR_PERMISSION_DENIED:
+                status = PERMISSION_DENIED;
+                break;
+            default:
+                status = -EINVAL;
+                ALOGW("An error occurred while connecting to camera %d: %s", cameraId,
+                        (cs != nullptr) ? "Service not available" : ret.toString8().string());
+                break;
+        }
         c.clear();
     }
     return status;
@@ -107,21 +137,21 @@
 status_t Camera::reconnect()
 {
     ALOGV("reconnect");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->connect(this);
 }
 
 status_t Camera::lock()
 {
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->lock();
 }
 
 status_t Camera::unlock()
 {
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->unlock();
 }
@@ -130,35 +160,43 @@
 status_t Camera::setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer)
 {
     ALOGV("setPreviewTarget(%p)", bufferProducer.get());
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     ALOGD_IF(bufferProducer == 0, "app passed NULL surface");
     return c->setPreviewTarget(bufferProducer);
 }
 
+status_t Camera::setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer)
+{
+    ALOGV("setVideoTarget(%p)", bufferProducer.get());
+    sp <::android::hardware::ICamera> c = mCamera;
+    if (c == 0) return NO_INIT;
+    ALOGD_IF(bufferProducer == 0, "app passed NULL video surface");
+    return c->setVideoTarget(bufferProducer);
+}
+
 // start preview mode
 status_t Camera::startPreview()
 {
     ALOGV("startPreview");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->startPreview();
 }
 
-status_t Camera::storeMetaDataInBuffers(bool enabled)
+status_t Camera::setVideoBufferMode(int32_t videoBufferMode)
 {
-    ALOGV("storeMetaDataInBuffers: %s",
-            enabled? "true": "false");
-    sp <ICamera> c = mCamera;
+    ALOGV("setVideoBufferMode: %d", videoBufferMode);
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
-    return c->storeMetaDataInBuffers(enabled);
+    return c->setVideoBufferMode(videoBufferMode);
 }
 
 // start recording mode, must call setPreviewTarget first
 status_t Camera::startRecording()
 {
     ALOGV("startRecording");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->startRecording();
 }
@@ -167,7 +205,7 @@
 void Camera::stopPreview()
 {
     ALOGV("stopPreview");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return;
     c->stopPreview();
 }
@@ -180,7 +218,7 @@
         Mutex::Autolock _l(mLock);
         mRecordingProxyListener.clear();
     }
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return;
     c->stopRecording();
 }
@@ -189,16 +227,24 @@
 void Camera::releaseRecordingFrame(const sp<IMemory>& mem)
 {
     ALOGV("releaseRecordingFrame");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return;
     c->releaseRecordingFrame(mem);
 }
 
+void Camera::releaseRecordingFrameHandle(native_handle_t* handle)
+{
+    ALOGV("releaseRecordingFrameHandle");
+    sp <::android::hardware::ICamera> c = mCamera;
+    if (c == 0) return;
+    c->releaseRecordingFrameHandle(handle);
+}
+
 // get preview state
 bool Camera::previewEnabled()
 {
     ALOGV("previewEnabled");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return false;
     return c->previewEnabled();
 }
@@ -207,7 +253,7 @@
 bool Camera::recordingEnabled()
 {
     ALOGV("recordingEnabled");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return false;
     return c->recordingEnabled();
 }
@@ -215,7 +261,7 @@
 status_t Camera::autoFocus()
 {
     ALOGV("autoFocus");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->autoFocus();
 }
@@ -223,7 +269,7 @@
 status_t Camera::cancelAutoFocus()
 {
     ALOGV("cancelAutoFocus");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->cancelAutoFocus();
 }
@@ -232,7 +278,7 @@
 status_t Camera::takePicture(int msgType)
 {
     ALOGV("takePicture: 0x%x", msgType);
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->takePicture(msgType);
 }
@@ -241,7 +287,7 @@
 status_t Camera::setParameters(const String8& params)
 {
     ALOGV("setParameters");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->setParameters(params);
 }
@@ -251,7 +297,7 @@
 {
     ALOGV("getParameters");
     String8 params;
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c != 0) params = mCamera->getParameters();
     return params;
 }
@@ -260,7 +306,7 @@
 status_t Camera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
 {
     ALOGV("sendCommand");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->sendCommand(cmd, arg1, arg2);
 }
@@ -280,7 +326,7 @@
 void Camera::setPreviewCallbackFlags(int flag)
 {
     ALOGV("setPreviewCallbackFlags");
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return;
     mCamera->setPreviewCallbackFlag(flag);
 }
@@ -288,7 +334,7 @@
 status_t Camera::setPreviewCallbackTarget(
         const sp<IGraphicBufferProducer>& callbackProducer)
 {
-    sp <ICamera> c = mCamera;
+    sp <::android::hardware::ICamera> c = mCamera;
     if (c == 0) return NO_INIT;
     return c->setPreviewCallbackTarget(callbackProducer);
 }
@@ -343,6 +389,35 @@
     }
 }
 
+void Camera::recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle)
+{
+    // If recording proxy listener is registered, forward the frame and return.
+    // The other listener (mListener) is ignored because the receiver needs to
+    // call releaseRecordingFrameHandle.
+    sp<ICameraRecordingProxyListener> proxylistener;
+    {
+        Mutex::Autolock _l(mLock);
+        proxylistener = mRecordingProxyListener;
+    }
+    if (proxylistener != NULL) {
+        proxylistener->recordingFrameHandleCallbackTimestamp(timestamp, handle);
+        return;
+    }
+
+    sp<CameraListener> listener;
+    {
+        Mutex::Autolock _l(mLock);
+        listener = mListener;
+    }
+
+    if (listener != NULL) {
+        listener->postRecordingFrameHandleTimestamp(timestamp, handle);
+    } else {
+        ALOGW("No listener was set. Drop a recording frame.");
+        releaseRecordingFrameHandle(handle);
+    }
+}
+
 sp<ICameraRecordingProxy> Camera::getRecordingProxy() {
     ALOGV("getProxy");
     return new RecordingProxy(this);
@@ -368,6 +443,11 @@
     mCamera->releaseRecordingFrame(mem);
 }
 
+void Camera::RecordingProxy::releaseRecordingFrameHandle(native_handle_t* handle) {
+    ALOGV("RecordingProxy::releaseRecordingFrameHandle");
+    mCamera->releaseRecordingFrameHandle(handle);
+}
+
 Camera::RecordingProxy::RecordingProxy(const sp<Camera>& camera)
 {
     mCamera = camera;
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 5d50aa8..15d7715 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -21,12 +21,13 @@
 #include <utils/threads.h>
 #include <utils/Mutex.h>
 
+#include <android/hardware/ICameraService.h>
+
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/IMemory.h>
 
 #include <camera/CameraBase.h>
-#include <camera/ICameraService.h>
 
 // needed to instantiate
 #include <camera/Camera.h>
@@ -35,8 +36,28 @@
 
 namespace android {
 
+namespace hardware {
+
+status_t CameraInfo::writeToParcel(Parcel* parcel) const {
+    status_t res;
+    res = parcel->writeInt32(facing);
+    if (res != OK) return res;
+    res = parcel->writeInt32(orientation);
+    return res;
+}
+
+status_t CameraInfo::readFromParcel(const Parcel* parcel) {
+    status_t res;
+    res = parcel->readInt32(&facing);
+    if (res != OK) return res;
+    res = parcel->readInt32(&orientation);
+    return res;
+}
+
+}
+
 namespace {
-    sp<ICameraService>        gCameraService;
+    sp<::android::hardware::ICameraService> gCameraService;
     const int                 kCameraServicePollDelay = 500000; // 0.5s
     const char*               kCameraServiceName      = "media.camera";
 
@@ -65,7 +86,7 @@
 
 // establish binder interface to camera service
 template <typename TCam, typename TCamTraits>
-const sp<ICameraService>& CameraBase<TCam, TCamTraits>::getCameraService()
+const sp<::android::hardware::ICameraService>& CameraBase<TCam, TCamTraits>::getCameraService()
 {
     Mutex::Autolock _l(gLock);
     if (gCameraService.get() == 0) {
@@ -83,7 +104,7 @@
             gDeathNotifier = new DeathNotifier();
         }
         binder->linkToDeath(gDeathNotifier);
-        gCameraService = interface_cast<ICameraService>(binder);
+        gCameraService = interface_cast<::android::hardware::ICameraService>(binder);
     }
     ALOGE_IF(gCameraService == 0, "no CameraService!?");
     return gCameraService;
@@ -92,24 +113,25 @@
 template <typename TCam, typename TCamTraits>
 sp<TCam> CameraBase<TCam, TCamTraits>::connect(int cameraId,
                                                const String16& clientPackageName,
-                                               int clientUid)
+                                               int clientUid, int clientPid)
 {
     ALOGV("%s: connect", __FUNCTION__);
     sp<TCam> c = new TCam(cameraId);
     sp<TCamCallbacks> cl = c;
-    status_t status = NO_ERROR;
-    const sp<ICameraService>& cs = getCameraService();
+    const sp<::android::hardware::ICameraService>& cs = getCameraService();
 
-    if (cs != 0) {
+    binder::Status ret;
+    if (cs != nullptr) {
         TCamConnectService fnConnectService = TCamTraits::fnConnectService;
-        status = (cs.get()->*fnConnectService)(cl, cameraId, clientPackageName, clientUid,
-                                             /*out*/ c->mCamera);
+        ret = (cs.get()->*fnConnectService)(cl, cameraId, clientPackageName, clientUid,
+                                               clientPid, /*out*/ &c->mCamera);
     }
-    if (status == OK && c->mCamera != 0) {
+    if (ret.isOk() && c->mCamera != nullptr) {
         IInterface::asBinder(c->mCamera)->linkToDeath(c);
         c->mStatus = NO_ERROR;
     } else {
-        ALOGW("An error occurred while connecting to camera: %d", cameraId);
+        ALOGW("An error occurred while connecting to camera %d: %s", cameraId,
+                (cs != nullptr) ? "Service not available" : ret.toString8().string());
         c.clear();
     }
     return c;
@@ -182,38 +204,50 @@
 
 template <typename TCam, typename TCamTraits>
 int CameraBase<TCam, TCamTraits>::getNumberOfCameras() {
-    const sp<ICameraService> cs = getCameraService();
+    const sp<::android::hardware::ICameraService> cs = getCameraService();
 
     if (!cs.get()) {
         // as required by the public Java APIs
         return 0;
     }
-    return cs->getNumberOfCameras();
+    int32_t count;
+    binder::Status res = cs->getNumberOfCameras(
+            ::android::hardware::ICameraService::CAMERA_TYPE_BACKWARD_COMPATIBLE,
+            &count);
+    if (!res.isOk()) {
+        ALOGE("Error reading number of cameras: %s",
+                res.toString8().string());
+        count = 0;
+    }
+    return count;
 }
 
 // this can be in BaseCamera but it should be an instance method
 template <typename TCam, typename TCamTraits>
 status_t CameraBase<TCam, TCamTraits>::getCameraInfo(int cameraId,
-                               struct CameraInfo* cameraInfo) {
-    const sp<ICameraService>& cs = getCameraService();
+        struct hardware::CameraInfo* cameraInfo) {
+    const sp<::android::hardware::ICameraService>& cs = getCameraService();
     if (cs == 0) return UNKNOWN_ERROR;
-    return cs->getCameraInfo(cameraId, cameraInfo);
+    binder::Status res = cs->getCameraInfo(cameraId, cameraInfo);
+    return res.isOk() ? OK : res.serviceSpecificErrorCode();
 }
 
 template <typename TCam, typename TCamTraits>
 status_t CameraBase<TCam, TCamTraits>::addServiceListener(
-                            const sp<ICameraServiceListener>& listener) {
-    const sp<ICameraService>& cs = getCameraService();
+        const sp<::android::hardware::ICameraServiceListener>& listener) {
+    const sp<::android::hardware::ICameraService>& cs = getCameraService();
     if (cs == 0) return UNKNOWN_ERROR;
-    return cs->addListener(listener);
+    binder::Status res = cs->addListener(listener);
+    return res.isOk() ? OK : res.serviceSpecificErrorCode();
 }
 
 template <typename TCam, typename TCamTraits>
 status_t CameraBase<TCam, TCamTraits>::removeServiceListener(
-                            const sp<ICameraServiceListener>& listener) {
-    const sp<ICameraService>& cs = getCameraService();
+        const sp<::android::hardware::ICameraServiceListener>& listener) {
+    const sp<::android::hardware::ICameraService>& cs = getCameraService();
     if (cs == 0) return UNKNOWN_ERROR;
-    return cs->removeListener(listener);
+    binder::Status res = cs->removeListener(listener);
+    return res.isOk() ? OK : res.serviceSpecificErrorCode();
 }
 
 template class CameraBase<Camera>;
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 46bcc1d..c78fc5d 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -79,7 +79,7 @@
     return mBuffer;
 }
 
-status_t CameraMetadata::unlock(const camera_metadata_t *buffer) {
+status_t CameraMetadata::unlock(const camera_metadata_t *buffer) const {
     if (!mLocked) {
         ALOGE("%s: Can't unlock a non-locked CameraMetadata!", __FUNCTION__);
         return INVALID_OPERATION;
@@ -621,7 +621,7 @@
     return res;
 }
 
-status_t CameraMetadata::readFromParcel(Parcel *parcel) {
+status_t CameraMetadata::readFromParcel(const Parcel *parcel) {
 
     ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
 
diff --git a/camera/CameraParameters2.cpp b/camera/CameraParameters2.cpp
index 378afeb..c29233c 100644
--- a/camera/CameraParameters2.cpp
+++ b/camera/CameraParameters2.cpp
@@ -351,7 +351,7 @@
 
 void CameraParameters2::dump() const
 {
-    ALOGD("dump: mMap.size = %d", mMap.size());
+    ALOGD("dump: mMap.size = %zu", mMap.size());
     for (size_t i = 0; i < mMap.size(); i++) {
         String8 k, v;
         k = mMap.keyAt(i);
diff --git a/camera/CameraUtils.cpp b/camera/CameraUtils.cpp
index 04244ac..1676be1 100644
--- a/camera/CameraUtils.cpp
+++ b/camera/CameraUtils.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include <camera/CameraUtils.h>
+#include <media/hardware/HardwareAPI.h>
 
 #include <system/window.h>
 #include <system/graphics.h>
@@ -121,5 +122,4 @@
     return OK;
 }
 
-
 } /* namespace android */
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index 4e36160..0a447e7 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -26,7 +26,7 @@
     return requestId >= 0;
 }
 
-status_t CaptureResultExtras::readFromParcel(Parcel *parcel) {
+status_t CaptureResultExtras::readFromParcel(const Parcel *parcel) {
     if (parcel == NULL) {
         ALOGE("%s: Null parcel", __FUNCTION__);
         return BAD_VALUE;
@@ -38,6 +38,7 @@
     parcel->readInt32(&precaptureTriggerId);
     parcel->readInt64(&frameNumber);
     parcel->readInt32(&partialResultCount);
+    parcel->readInt32(&errorStreamId);
 
     return OK;
 }
@@ -54,6 +55,7 @@
     parcel->writeInt32(precaptureTriggerId);
     parcel->writeInt64(frameNumber);
     parcel->writeInt32(partialResultCount);
+    parcel->writeInt32(errorStreamId);
 
     return OK;
 }
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 9943be6..0680d7c 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -21,11 +21,15 @@
 #include <stdint.h>
 #include <sys/types.h>
 #include <binder/Parcel.h>
-#include <camera/ICamera.h>
+#include <camera/CameraUtils.h>
+#include <android/hardware/ICamera.h>
+#include <android/hardware/ICameraClient.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
+#include <media/hardware/HardwareAPI.h>
 
 namespace android {
+namespace hardware {
 
 enum {
     DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
@@ -48,7 +52,9 @@
     STOP_RECORDING,
     RECORDING_ENABLED,
     RELEASE_RECORDING_FRAME,
-    STORE_META_DATA_IN_BUFFERS,
+    SET_VIDEO_BUFFER_MODE,
+    SET_VIDEO_BUFFER_TARGET,
+    RELEASE_RECORDING_FRAME_HANDLE,
 };
 
 class BpCamera: public BpInterface<ICamera>
@@ -60,13 +66,14 @@
     }
 
     // disconnect from camera service
-    void disconnect()
+    binder::Status disconnect()
     {
         ALOGV("disconnect");
         Parcel data, reply;
         data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
         remote()->transact(DISCONNECT, data, &reply);
         reply.readExceptionCode();
+        return binder::Status::ok();
     }
 
     // pass the buffered IGraphicBufferProducer to the camera service
@@ -148,16 +155,30 @@
         Parcel data, reply;
         data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
         data.writeStrongBinder(IInterface::asBinder(mem));
+
         remote()->transact(RELEASE_RECORDING_FRAME, data, &reply);
     }
 
-    status_t storeMetaDataInBuffers(bool enabled)
-    {
-        ALOGV("storeMetaDataInBuffers: %s", enabled? "true": "false");
+    void releaseRecordingFrameHandle(native_handle_t *handle) {
+        ALOGV("releaseRecordingFrameHandle");
         Parcel data, reply;
         data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
-        data.writeInt32(enabled);
-        remote()->transact(STORE_META_DATA_IN_BUFFERS, data, &reply);
+        data.writeNativeHandle(handle);
+
+        remote()->transact(RELEASE_RECORDING_FRAME_HANDLE, data, &reply);
+
+        // Close the native handle because camera received a dup copy.
+        native_handle_close(handle);
+        native_handle_delete(handle);
+    }
+
+    status_t setVideoBufferMode(int32_t videoBufferMode)
+    {
+        ALOGV("setVideoBufferMode: %d", videoBufferMode);
+        Parcel data, reply;
+        data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+        data.writeInt32(videoBufferMode);
+        remote()->transact(SET_VIDEO_BUFFER_MODE, data, &reply);
         return reply.readInt32();
     }
 
@@ -268,6 +289,17 @@
         remote()->transact(UNLOCK, data, &reply);
         return reply.readInt32();
     }
+
+    status_t setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer)
+    {
+        ALOGV("setVideoTarget");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+        sp<IBinder> b(IInterface::asBinder(bufferProducer));
+        data.writeStrongBinder(b);
+        remote()->transact(SET_VIDEO_BUFFER_TARGET, data, &reply);
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(Camera, "android.hardware.ICamera");
@@ -339,11 +371,18 @@
             releaseRecordingFrame(mem);
             return NO_ERROR;
         } break;
-        case STORE_META_DATA_IN_BUFFERS: {
-            ALOGV("STORE_META_DATA_IN_BUFFERS");
+        case RELEASE_RECORDING_FRAME_HANDLE: {
+            ALOGV("RELEASE_RECORDING_FRAME_HANDLE");
             CHECK_INTERFACE(ICamera, data, reply);
-            bool enabled = data.readInt32();
-            reply->writeInt32(storeMetaDataInBuffers(enabled));
+            // releaseRecordingFrameHandle will be responsble to close the native handle.
+            releaseRecordingFrameHandle(data.readNativeHandle());
+            return NO_ERROR;
+        } break;
+        case SET_VIDEO_BUFFER_MODE: {
+            ALOGV("SET_VIDEO_BUFFER_MODE");
+            CHECK_INTERFACE(ICamera, data, reply);
+            int32_t mode = data.readInt32();
+            reply->writeInt32(setVideoBufferMode(mode));
             return NO_ERROR;
         } break;
         case PREVIEW_ENABLED: {
@@ -415,6 +454,14 @@
             reply->writeInt32(unlock());
             return NO_ERROR;
         } break;
+        case SET_VIDEO_BUFFER_TARGET: {
+            ALOGV("SET_VIDEO_BUFFER_TARGET");
+            CHECK_INTERFACE(ICamera, data, reply);
+            sp<IGraphicBufferProducer> st =
+                interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
+            reply->writeInt32(setVideoTarget(st));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
@@ -422,4 +469,5 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace hardware
+} // namespace android
diff --git a/camera/ICameraClient.cpp b/camera/ICameraClient.cpp
index 179a341..68cbfb8 100644
--- a/camera/ICameraClient.cpp
+++ b/camera/ICameraClient.cpp
@@ -2,16 +2,16 @@
 **
 ** Copyright 2008, The Android Open Source Project
 **
-** Licensed under the Apache License, Version 2.0 (the "License"); 
-** you may not use this file except in compliance with the License. 
-** You may obtain a copy of the License at 
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
 **
-**     http://www.apache.org/licenses/LICENSE-2.0 
+**     http://www.apache.org/licenses/LICENSE-2.0
 **
-** Unless required by applicable law or agreed to in writing, software 
-** distributed under the License is distributed on an "AS IS" BASIS, 
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
-** See the License for the specific language governing permissions and 
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
 ** limitations under the License.
 */
 
@@ -20,14 +20,18 @@
 #include <utils/Log.h>
 #include <stdint.h>
 #include <sys/types.h>
-#include <camera/ICameraClient.h>
+#include <camera/CameraUtils.h>
+#include <android/hardware/ICameraClient.h>
+#include <media/hardware/HardwareAPI.h>
 
 namespace android {
+namespace hardware {
 
 enum {
     NOTIFY_CALLBACK = IBinder::FIRST_CALL_TRANSACTION,
     DATA_CALLBACK,
     DATA_CALLBACK_TIMESTAMP,
+    RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
 };
 
 class BpCameraClient: public BpInterface<ICameraClient>
@@ -77,6 +81,16 @@
         data.writeStrongBinder(IInterface::asBinder(imageData));
         remote()->transact(DATA_CALLBACK_TIMESTAMP, data, &reply, IBinder::FLAG_ONEWAY);
     }
+
+    void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle) {
+        ALOGV("recordingFrameHandleCallbackTimestamp");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
+        data.writeInt64(timestamp);
+        data.writeNativeHandle(handle);
+        remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP, data, &reply,
+                IBinder::FLAG_ONEWAY);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(CameraClient, "android.hardware.ICameraClient");
@@ -121,6 +135,25 @@
             dataCallbackTimestamp(timestamp, msgType, imageData);
             return NO_ERROR;
         } break;
+        case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP: {
+            ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP");
+            CHECK_INTERFACE(ICameraClient, data, reply);
+            nsecs_t timestamp;
+            status_t res = data.readInt64(&timestamp);
+            if (res != OK) {
+                ALOGE("%s: Failed to read timestamp: %s (%d)", __FUNCTION__, strerror(-res), res);
+                return BAD_VALUE;
+            }
+            native_handle_t* handle = data.readNativeHandle();
+            if (handle == nullptr) {
+                ALOGE("%s: Received a null native handle", __FUNCTION__);
+                return BAD_VALUE;
+            }
+
+            // The native handle will be freed in BpCamera::releaseRecordingFrameHandle.
+            recordingFrameHandleCallbackTimestamp(timestamp, handle);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
@@ -128,5 +161,5 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
-
+} // namespace hardware
+} // namespace android
diff --git a/camera/ICameraRecordingProxy.cpp b/camera/ICameraRecordingProxy.cpp
index 517b64f..63c4b1d 100644
--- a/camera/ICameraRecordingProxy.cpp
+++ b/camera/ICameraRecordingProxy.cpp
@@ -16,10 +16,12 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ICameraRecordingProxy"
+#include <camera/CameraUtils.h>
 #include <camera/ICameraRecordingProxy.h>
 #include <camera/ICameraRecordingProxyListener.h>
 #include <binder/IMemory.h>
 #include <binder/Parcel.h>
+#include <media/hardware/HardwareAPI.h>
 #include <stdint.h>
 #include <utils/Log.h>
 
@@ -29,13 +31,9 @@
     START_RECORDING = IBinder::FIRST_CALL_TRANSACTION,
     STOP_RECORDING,
     RELEASE_RECORDING_FRAME,
+    RELEASE_RECORDING_FRAME_HANDLE,
 };
 
-uint8_t ICameraRecordingProxy::baseObject = 0;
-
-size_t ICameraRecordingProxy::getCommonBaseAddress() {
-    return (size_t)&baseObject;
-}
 
 class BpCameraRecordingProxy: public BpInterface<ICameraRecordingProxy>
 {
@@ -71,6 +69,19 @@
         data.writeStrongBinder(IInterface::asBinder(mem));
         remote()->transact(RELEASE_RECORDING_FRAME, data, &reply);
     }
+
+    void releaseRecordingFrameHandle(native_handle_t *handle) {
+        ALOGV("releaseRecordingFrameHandle");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
+        data.writeNativeHandle(handle);
+
+        remote()->transact(RELEASE_RECORDING_FRAME_HANDLE, data, &reply);
+
+        // Close the native handle because camera received a dup copy.
+        native_handle_close(handle);
+        native_handle_delete(handle);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(CameraRecordingProxy, "android.hardware.ICameraRecordingProxy");
@@ -102,7 +113,14 @@
             releaseRecordingFrame(mem);
             return NO_ERROR;
         } break;
+        case RELEASE_RECORDING_FRAME_HANDLE: {
+            ALOGV("RELEASE_RECORDING_FRAME_HANDLE");
+            CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
 
+            // releaseRecordingFrameHandle will be responsble to close the native handle.
+            releaseRecordingFrameHandle(data.readNativeHandle());
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
@@ -111,3 +129,4 @@
 // ----------------------------------------------------------------------------
 
 }; // namespace android
+
diff --git a/camera/ICameraRecordingProxyListener.cpp b/camera/ICameraRecordingProxyListener.cpp
index cf848fc..fa4dfd8 100644
--- a/camera/ICameraRecordingProxyListener.cpp
+++ b/camera/ICameraRecordingProxyListener.cpp
@@ -16,15 +16,18 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ICameraRecordingProxyListener"
+#include <camera/CameraUtils.h>
 #include <camera/ICameraRecordingProxyListener.h>
 #include <binder/IMemory.h>
 #include <binder/Parcel.h>
+#include <media/hardware/HardwareAPI.h>
 #include <utils/Log.h>
 
 namespace android {
 
 enum {
     DATA_CALLBACK_TIMESTAMP = IBinder::FIRST_CALL_TRANSACTION,
+    RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
 };
 
 class BpCameraRecordingProxyListener: public BpInterface<ICameraRecordingProxyListener>
@@ -45,6 +48,20 @@
         data.writeStrongBinder(IInterface::asBinder(imageData));
         remote()->transact(DATA_CALLBACK_TIMESTAMP, data, &reply, IBinder::FLAG_ONEWAY);
     }
+
+    void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle) {
+        ALOGV("recordingFrameHandleCallbackTimestamp");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraRecordingProxyListener::getInterfaceDescriptor());
+        data.writeInt64(timestamp);
+        data.writeNativeHandle(handle);
+        remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP, data, &reply,
+                IBinder::FLAG_ONEWAY);
+
+        // The native handle is dupped in ICameraClient so we need to free it here.
+        native_handle_close(handle);
+        native_handle_delete(handle);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(CameraRecordingProxyListener, "android.hardware.ICameraRecordingProxyListener");
@@ -64,6 +81,26 @@
             dataCallbackTimestamp(timestamp, msgType, imageData);
             return NO_ERROR;
         } break;
+        case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP: {
+            ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP");
+            CHECK_INTERFACE(ICameraRecordingProxyListener, data, reply);
+            nsecs_t timestamp;
+            status_t res = data.readInt64(&timestamp);
+            if (res != OK) {
+                ALOGE("%s: Failed to read timestamp: %s (%d)", __FUNCTION__, strerror(-res), res);
+                return BAD_VALUE;
+            }
+
+            native_handle_t* handle = data.readNativeHandle();
+            if (handle == nullptr) {
+                ALOGE("%s: Received a null native handle", __FUNCTION__);
+                return BAD_VALUE;
+            }
+            // The native handle will be freed in
+            // BpCameraRecordingProxy::releaseRecordingFrameHandle.
+            recordingFrameHandleCallbackTimestamp(timestamp, handle);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
deleted file mode 100644
index b359f57..0000000
--- a/camera/ICameraService.cpp
+++ /dev/null
@@ -1,534 +0,0 @@
-/*
-**
-** Copyright 2008, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "BpCameraService"
-#include <utils/Log.h>
-#include <utils/Errors.h>
-#include <utils/String16.h>
-
-#include <inttypes.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-
-#include <camera/ICameraService.h>
-#include <camera/ICameraServiceListener.h>
-#include <camera/ICamera.h>
-#include <camera/ICameraClient.h>
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
-#include <camera/CameraMetadata.h>
-#include <camera/VendorTagDescriptor.h>
-
-namespace android {
-
-namespace {
-
-enum {
-    EX_SECURITY = -1,
-    EX_BAD_PARCELABLE = -2,
-    EX_ILLEGAL_ARGUMENT = -3,
-    EX_NULL_POINTER = -4,
-    EX_ILLEGAL_STATE = -5,
-    EX_HAS_REPLY_HEADER = -128,  // special; see below
-};
-
-static bool readExceptionCode(Parcel& reply) {
-    int32_t exceptionCode = reply.readExceptionCode();
-
-    if (exceptionCode != 0) {
-        const char* errorMsg;
-        switch(exceptionCode) {
-            case EX_SECURITY:
-                errorMsg = "Security";
-                break;
-            case EX_BAD_PARCELABLE:
-                errorMsg = "BadParcelable";
-                break;
-            case EX_NULL_POINTER:
-                errorMsg = "NullPointer";
-                break;
-            case EX_ILLEGAL_STATE:
-                errorMsg = "IllegalState";
-                break;
-            // Binder should be handling this code inside Parcel::readException
-            // but lets have a to-string here anyway just in case.
-            case EX_HAS_REPLY_HEADER:
-                errorMsg = "HasReplyHeader";
-                break;
-            default:
-                errorMsg = "Unknown";
-        }
-
-        ALOGE("Binder transmission error %s (%d)", errorMsg, exceptionCode);
-        return true;
-    }
-
-    return false;
-}
-
-};
-
-class BpCameraService: public BpInterface<ICameraService>
-{
-public:
-    BpCameraService(const sp<IBinder>& impl)
-        : BpInterface<ICameraService>(impl)
-    {
-    }
-
-    // get number of cameras available that support standard camera operations
-    virtual int32_t getNumberOfCameras()
-    {
-        return getNumberOfCameras(CAMERA_TYPE_BACKWARD_COMPATIBLE);
-    }
-
-    // get number of cameras available of a given type
-    virtual int32_t getNumberOfCameras(int type)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeInt32(type);
-        remote()->transact(BnCameraService::GET_NUMBER_OF_CAMERAS, data, &reply);
-
-        if (readExceptionCode(reply)) return 0;
-        return reply.readInt32();
-    }
-
-    // get information about a camera
-    virtual status_t getCameraInfo(int cameraId,
-                                   struct CameraInfo* cameraInfo) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeInt32(cameraId);
-        remote()->transact(BnCameraService::GET_CAMERA_INFO, data, &reply);
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        status_t result = reply.readInt32();
-        if (reply.readInt32() != 0) {
-            cameraInfo->facing = reply.readInt32();
-            cameraInfo->orientation = reply.readInt32();
-        }
-        return result;
-    }
-
-    // get camera characteristics (static metadata)
-    virtual status_t getCameraCharacteristics(int cameraId,
-                                              CameraMetadata* cameraInfo) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeInt32(cameraId);
-        remote()->transact(BnCameraService::GET_CAMERA_CHARACTERISTICS, data, &reply);
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        status_t result = reply.readInt32();
-
-        CameraMetadata out;
-        if (reply.readInt32() != 0) {
-            out.readFromParcel(&reply);
-        }
-
-        if (cameraInfo != NULL) {
-            cameraInfo->swap(out);
-        }
-
-        return result;
-    }
-
-    // Get enumeration and description of vendor tags for camera
-    virtual status_t getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        remote()->transact(BnCameraService::GET_CAMERA_VENDOR_TAG_DESCRIPTOR, data, &reply);
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        status_t result = reply.readInt32();
-
-        if (reply.readInt32() != 0) {
-            sp<VendorTagDescriptor> d;
-            if (VendorTagDescriptor::createFromParcel(&reply, /*out*/d) == OK) {
-                desc = d;
-            }
-        }
-        return result;
-    }
-
-    // connect to camera service (android.hardware.Camera)
-    virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
-                             const String16 &clientPackageName, int clientUid,
-                             /*out*/
-                             sp<ICamera>& device)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(cameraClient));
-        data.writeInt32(cameraId);
-        data.writeString16(clientPackageName);
-        data.writeInt32(clientUid);
-
-        status_t status;
-        status = remote()->transact(BnCameraService::CONNECT, data, &reply);
-        if (status != OK) return status;
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        status = reply.readInt32();
-        if (reply.readInt32() != 0) {
-            device = interface_cast<ICamera>(reply.readStrongBinder());
-        }
-        return status;
-    }
-
-    // connect to camera service (android.hardware.Camera)
-    virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
-                             int halVersion,
-                             const String16 &clientPackageName, int clientUid,
-                             /*out*/sp<ICamera>& device)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(cameraClient));
-        data.writeInt32(cameraId);
-        data.writeInt32(halVersion);
-        data.writeString16(clientPackageName);
-        data.writeInt32(clientUid);
-
-        status_t status;
-        status = remote()->transact(BnCameraService::CONNECT_LEGACY, data, &reply);
-        if (status != OK) return status;
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        status = reply.readInt32();
-        if (reply.readInt32() != 0) {
-            device = interface_cast<ICamera>(reply.readStrongBinder());
-        }
-        return status;
-    }
-
-    virtual status_t setTorchMode(const String16& cameraId, bool enabled,
-            const sp<IBinder>& clientBinder)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeString16(cameraId);
-        data.writeInt32(enabled ? 1 : 0);
-        data.writeStrongBinder(clientBinder);
-        remote()->transact(BnCameraService::SET_TORCH_MODE, data, &reply);
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        return reply.readInt32();
-    }
-
-    // connect to camera service (android.hardware.camera2.CameraDevice)
-    virtual status_t connectDevice(
-            const sp<ICameraDeviceCallbacks>& cameraCb,
-            int cameraId,
-            const String16& clientPackageName,
-            int clientUid,
-            /*out*/
-            sp<ICameraDeviceUser>& device)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(cameraCb));
-        data.writeInt32(cameraId);
-        data.writeString16(clientPackageName);
-        data.writeInt32(clientUid);
-
-        status_t status;
-        status = remote()->transact(BnCameraService::CONNECT_DEVICE, data, &reply);
-        if (status != OK) return status;
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        status = reply.readInt32();
-        if (reply.readInt32() != 0) {
-            device = interface_cast<ICameraDeviceUser>(reply.readStrongBinder());
-        }
-        return status;
-    }
-
-    virtual status_t addListener(const sp<ICameraServiceListener>& listener)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(listener));
-        remote()->transact(BnCameraService::ADD_LISTENER, data, &reply);
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        return reply.readInt32();
-    }
-
-    virtual status_t removeListener(const sp<ICameraServiceListener>& listener)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(listener));
-        remote()->transact(BnCameraService::REMOVE_LISTENER, data, &reply);
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        return reply.readInt32();
-    }
-
-    virtual status_t getLegacyParameters(int cameraId, String16* parameters) {
-        if (parameters == NULL) {
-            ALOGE("%s: parameters must not be null", __FUNCTION__);
-            return BAD_VALUE;
-        }
-
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-
-        data.writeInt32(cameraId);
-        remote()->transact(BnCameraService::GET_LEGACY_PARAMETERS, data, &reply);
-        if (readExceptionCode(reply)) return -EPROTO;
-
-        status_t res = data.readInt32();
-        int32_t length = data.readInt32(); // -1 means null
-        if (length > 0) {
-            *parameters = data.readString16();
-        } else {
-            *parameters = String16();
-        }
-
-        return res;
-    }
-
-    virtual status_t supportsCameraApi(int cameraId, int apiVersion) {
-        Parcel data, reply;
-
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeInt32(cameraId);
-        data.writeInt32(apiVersion);
-        remote()->transact(BnCameraService::SUPPORTS_CAMERA_API, data, &reply);
-        if (readExceptionCode(reply)) return -EPROTO;
-
-        status_t res = data.readInt32();
-        return res;
-    }
-
-    virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t len) {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeInt32(eventId);
-        data.writeInt32Array(len, args);
-        remote()->transact(BnCameraService::NOTIFY_SYSTEM_EVENT, data, &reply,
-                IBinder::FLAG_ONEWAY);
-    }
-
-};
-
-IMPLEMENT_META_INTERFACE(CameraService, "android.hardware.ICameraService");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraService::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    switch(code) {
-        case GET_NUMBER_OF_CAMERAS: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            reply->writeNoException();
-            reply->writeInt32(getNumberOfCameras(data.readInt32()));
-            return NO_ERROR;
-        } break;
-        case GET_CAMERA_INFO: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            CameraInfo cameraInfo = CameraInfo();
-            memset(&cameraInfo, 0, sizeof(cameraInfo));
-            status_t result = getCameraInfo(data.readInt32(), &cameraInfo);
-            reply->writeNoException();
-            reply->writeInt32(result);
-
-            // Fake a parcelable object here
-            reply->writeInt32(1); // means the parcelable is included
-            reply->writeInt32(cameraInfo.facing);
-            reply->writeInt32(cameraInfo.orientation);
-            return NO_ERROR;
-        } break;
-        case GET_CAMERA_CHARACTERISTICS: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            CameraMetadata info;
-            status_t result = getCameraCharacteristics(data.readInt32(), &info);
-            reply->writeNoException();
-            reply->writeInt32(result);
-
-            // out-variables are after exception and return value
-            reply->writeInt32(1); // means the parcelable is included
-            info.writeToParcel(reply);
-            return NO_ERROR;
-        } break;
-        case GET_CAMERA_VENDOR_TAG_DESCRIPTOR: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            sp<VendorTagDescriptor> d;
-            status_t result = getCameraVendorTagDescriptor(d);
-            reply->writeNoException();
-            reply->writeInt32(result);
-
-            // out-variables are after exception and return value
-            if (d == NULL) {
-                reply->writeInt32(0);
-            } else {
-                reply->writeInt32(1); // means the parcelable is included
-                d->writeToParcel(reply);
-            }
-            return NO_ERROR;
-        } break;
-        case CONNECT: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            sp<ICameraClient> cameraClient =
-                    interface_cast<ICameraClient>(data.readStrongBinder());
-            int32_t cameraId = data.readInt32();
-            const String16 clientName = data.readString16();
-            int32_t clientUid = data.readInt32();
-            sp<ICamera> camera;
-            status_t status = connect(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/camera);
-            reply->writeNoException();
-            reply->writeInt32(status);
-            if (camera != NULL) {
-                reply->writeInt32(1);
-                reply->writeStrongBinder(IInterface::asBinder(camera));
-            } else {
-                reply->writeInt32(0);
-            }
-            return NO_ERROR;
-        } break;
-        case CONNECT_DEVICE: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            sp<ICameraDeviceCallbacks> cameraClient =
-                interface_cast<ICameraDeviceCallbacks>(data.readStrongBinder());
-            int32_t cameraId = data.readInt32();
-            const String16 clientName = data.readString16();
-            int32_t clientUid = data.readInt32();
-            sp<ICameraDeviceUser> camera;
-            status_t status = connectDevice(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/camera);
-            reply->writeNoException();
-            reply->writeInt32(status);
-            if (camera != NULL) {
-                reply->writeInt32(1);
-                reply->writeStrongBinder(IInterface::asBinder(camera));
-            } else {
-                reply->writeInt32(0);
-            }
-            return NO_ERROR;
-        } break;
-        case ADD_LISTENER: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            sp<ICameraServiceListener> listener =
-                interface_cast<ICameraServiceListener>(data.readStrongBinder());
-            reply->writeNoException();
-            reply->writeInt32(addListener(listener));
-            return NO_ERROR;
-        } break;
-        case REMOVE_LISTENER: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            sp<ICameraServiceListener> listener =
-                interface_cast<ICameraServiceListener>(data.readStrongBinder());
-            reply->writeNoException();
-            reply->writeInt32(removeListener(listener));
-            return NO_ERROR;
-        } break;
-        case GET_LEGACY_PARAMETERS: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            int cameraId = data.readInt32();
-            String16 parameters;
-
-            reply->writeNoException();
-            // return value
-            reply->writeInt32(getLegacyParameters(cameraId, &parameters));
-            // out parameters
-            reply->writeInt32(1); // parameters is always available
-            reply->writeString16(parameters);
-            return NO_ERROR;
-        } break;
-        case SUPPORTS_CAMERA_API: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            int cameraId = data.readInt32();
-            int apiVersion = data.readInt32();
-
-            reply->writeNoException();
-            // return value
-            reply->writeInt32(supportsCameraApi(cameraId, apiVersion));
-            return NO_ERROR;
-        } break;
-        case CONNECT_LEGACY: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            sp<ICameraClient> cameraClient =
-                    interface_cast<ICameraClient>(data.readStrongBinder());
-            int32_t cameraId = data.readInt32();
-            int32_t halVersion = data.readInt32();
-            const String16 clientName = data.readString16();
-            int32_t clientUid = data.readInt32();
-            sp<ICamera> camera;
-            status_t status = connectLegacy(cameraClient, cameraId, halVersion,
-                    clientName, clientUid, /*out*/camera);
-            reply->writeNoException();
-            reply->writeInt32(status);
-            if (camera != NULL) {
-                reply->writeInt32(1);
-                reply->writeStrongBinder(IInterface::asBinder(camera));
-            } else {
-                reply->writeInt32(0);
-            }
-            return NO_ERROR;
-        } break;
-        case SET_TORCH_MODE: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            String16 cameraId = data.readString16();
-            bool enabled = data.readInt32() != 0 ? true : false;
-            const sp<IBinder> clientBinder = data.readStrongBinder();
-            status_t status = setTorchMode(cameraId, enabled, clientBinder);
-            reply->writeNoException();
-            reply->writeInt32(status);
-            return NO_ERROR;
-        } break;
-        case NOTIFY_SYSTEM_EVENT: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            int32_t eventId = data.readInt32();
-            int32_t len = data.readInt32();
-            if (len < 0) {
-                ALOGE("%s: Received poorly formatted length in binder request: notifySystemEvent.",
-                        __FUNCTION__);
-                return FAILED_TRANSACTION;
-            }
-            if (len > 512) {
-                ALOGE("%s: Length %" PRIi32 " too long in binder request: notifySystemEvent.",
-                        __FUNCTION__, len);
-                return FAILED_TRANSACTION;
-            }
-            int32_t events[len];
-            memset(events, 0, sizeof(int32_t) * len);
-            status_t status = data.read(events, sizeof(int32_t) * len);
-            if (status != NO_ERROR) {
-                ALOGE("%s: Received poorly formatted binder request: notifySystemEvent.",
-                        __FUNCTION__);
-                return FAILED_TRANSACTION;
-            }
-            notifySystemEvent(eventId, events, len);
-            return NO_ERROR;
-        } break;
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/ICameraServiceListener.cpp b/camera/ICameraServiceListener.cpp
deleted file mode 100644
index 0010325..0000000
--- a/camera/ICameraServiceListener.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-
-#include <camera/ICameraServiceListener.h>
-
-namespace android {
-
-namespace {
-    enum {
-        STATUS_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
-        TORCH_STATUS_CHANGED,
-    };
-}; // namespace anonymous
-
-class BpCameraServiceListener: public BpInterface<ICameraServiceListener>
-{
-
-public:
-    BpCameraServiceListener(const sp<IBinder>& impl)
-        : BpInterface<ICameraServiceListener>(impl)
-    {
-    }
-
-    virtual void onStatusChanged(Status status, int32_t cameraId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraServiceListener::getInterfaceDescriptor());
-
-        data.writeInt32(static_cast<int32_t>(status));
-        data.writeInt32(cameraId);
-
-        remote()->transact(STATUS_CHANGED,
-                           data,
-                           &reply,
-                           IBinder::FLAG_ONEWAY);
-    }
-
-    virtual void onTorchStatusChanged(TorchStatus status, const String16 &cameraId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraServiceListener::getInterfaceDescriptor());
-
-        data.writeInt32(static_cast<int32_t>(status));
-        data.writeString16(cameraId);
-
-        remote()->transact(TORCH_STATUS_CHANGED,
-                           data,
-                           &reply,
-                           IBinder::FLAG_ONEWAY);
-    }
-};
-
-IMPLEMENT_META_INTERFACE(CameraServiceListener, "android.hardware.ICameraServiceListener");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraServiceListener::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
-        uint32_t flags) {
-    switch(code) {
-        case STATUS_CHANGED: {
-            CHECK_INTERFACE(ICameraServiceListener, data, reply);
-
-            Status status = static_cast<Status>(data.readInt32());
-            int32_t cameraId = data.readInt32();
-
-            onStatusChanged(status, cameraId);
-
-            return NO_ERROR;
-        } break;
-        case TORCH_STATUS_CHANGED: {
-            CHECK_INTERFACE(ICameraServiceListener, data, reply);
-
-            TorchStatus status = static_cast<TorchStatus>(data.readInt32());
-            String16 cameraId = data.readString16();
-
-            onTorchStatusChanged(status, cameraId);
-
-            return NO_ERROR;
-        } break;
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index dce313a..5538da9 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -46,7 +46,9 @@
 static Mutex sLock;
 static sp<VendorTagDescriptor> sGlobalVendorTagDescriptor;
 
-VendorTagDescriptor::VendorTagDescriptor() {}
+namespace hardware {
+namespace camera2 {
+namespace params {
 
 VendorTagDescriptor::~VendorTagDescriptor() {
     size_t len = mReverseMapping.size();
@@ -55,90 +57,46 @@
     }
 }
 
-status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
-            /*out*/
-            sp<VendorTagDescriptor>& descriptor) {
-    if (vOps == NULL) {
-        ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    int tagCount = vOps->get_tag_count(vOps);
-    if (tagCount < 0 || tagCount > INT32_MAX) {
-        ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
-        return BAD_VALUE;
-    }
-
-    Vector<uint32_t> tagArray;
-    LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
-            "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
-
-    vOps->get_all_tags(vOps, /*out*/tagArray.editArray());
-
-    sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
-    desc->mTagCount = tagCount;
-
-    SortedVector<String8> sections;
-    KeyedVector<uint32_t, String8> tagToSectionMap;
-
-    for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
-        uint32_t tag = tagArray[i];
-        if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
-            ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
-            return BAD_VALUE;
-        }
-        const char *tagName = vOps->get_tag_name(vOps, tag);
-        if (tagName == NULL) {
-            ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
-            return BAD_VALUE;
-        }
-        desc->mTagToNameMap.add(tag, String8(tagName));
-        const char *sectionName = vOps->get_section_name(vOps, tag);
-        if (sectionName == NULL) {
-            ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag);
-            return BAD_VALUE;
-        }
-
-        String8 sectionString(sectionName);
-
-        sections.add(sectionString);
-        tagToSectionMap.add(tag, sectionString);
-
-        int tagType = vOps->get_tag_type(vOps, tag);
-        if (tagType < 0 || tagType >= NUM_TYPES) {
-            ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
-            return BAD_VALUE;
-        }
-        desc->mTagToTypeMap.add(tag, tagType);
-    }
-
-    desc->mSections = sections;
-
-    for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
-        uint32_t tag = tagArray[i];
-        String8 sectionString = tagToSectionMap.valueFor(tag);
-
-        // Set up tag to section index map
-        ssize_t index = sections.indexOf(sectionString);
-        LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
-        desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
-
-        // Set up reverse mapping
-        ssize_t reverseIndex = -1;
-        if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
-            KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
-            reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
-        }
-        desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
-    }
-
-    descriptor = desc;
-    return OK;
+VendorTagDescriptor::VendorTagDescriptor() :
+        mTagCount(0),
+        mVendorOps() {
 }
 
-status_t VendorTagDescriptor::createFromParcel(const Parcel* parcel,
-            /*out*/
-            sp<VendorTagDescriptor>& descriptor) {
+VendorTagDescriptor::VendorTagDescriptor(const VendorTagDescriptor& src) {
+    copyFrom(src);
+}
+
+VendorTagDescriptor& VendorTagDescriptor::operator=(const VendorTagDescriptor& rhs) {
+    copyFrom(rhs);
+    return *this;
+}
+
+void VendorTagDescriptor::copyFrom(const VendorTagDescriptor& src) {
+    if (this == &src) return;
+
+    size_t len = mReverseMapping.size();
+    for (size_t i = 0; i < len; ++i) {
+        delete mReverseMapping[i];
+    }
+    mReverseMapping.clear();
+
+    len = src.mReverseMapping.size();
+    // Have to copy KeyedVectors inside mReverseMapping
+    for (size_t i = 0; i < len; ++i) {
+        KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+        *nameMapper = *(src.mReverseMapping.valueAt(i));
+        mReverseMapping.add(src.mReverseMapping.keyAt(i), nameMapper);
+    }
+    // Everything else is simple
+    mTagToNameMap = src.mTagToNameMap;
+    mTagToSectionMap = src.mTagToSectionMap;
+    mTagToTypeMap = src.mTagToTypeMap;
+    mSections = src.mSections;
+    mTagCount = src.mTagCount;
+    mVendorOps = src.mVendorOps;
+}
+
+status_t VendorTagDescriptor::readFromParcel(const Parcel* parcel) {
     status_t res = OK;
     if (parcel == NULL) {
         ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
@@ -156,8 +114,7 @@
         return BAD_VALUE;
     }
 
-    sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
-    desc->mTagCount = tagCount;
+    mTagCount = tagCount;
 
     uint32_t tag, sectionIndex;
     uint32_t maxSectionIndex = 0;
@@ -197,9 +154,9 @@
         maxSectionIndex = (maxSectionIndex >= sectionIndex) ? maxSectionIndex : sectionIndex;
 
         allTags.add(tag);
-        desc->mTagToNameMap.add(tag, tagName);
-        desc->mTagToSectionMap.add(tag, sectionIndex);
-        desc->mTagToTypeMap.add(tag, tagType);
+        mTagToNameMap.add(tag, tagName);
+        mTagToSectionMap.add(tag, sectionIndex);
+        mTagToTypeMap.add(tag, tagType);
     }
 
     if (res != OK) {
@@ -217,7 +174,7 @@
                     __FUNCTION__, sectionCount, (maxSectionIndex + 1));
             return BAD_VALUE;
         }
-        LOG_ALWAYS_FATAL_IF(desc->mSections.setCapacity(sectionCount) <= 0,
+        LOG_ALWAYS_FATAL_IF(mSections.setCapacity(sectionCount) <= 0,
                 "Vector capacity must be positive");
         for (size_t i = 0; i < sectionCount; ++i) {
             String8 sectionName = parcel->readString8();
@@ -226,7 +183,7 @@
                       __FUNCTION__, i);
                 return NOT_ENOUGH_DATA;
             }
-            desc->mSections.add(sectionName);
+            mSections.add(sectionName);
         }
     }
 
@@ -235,17 +192,16 @@
     // Set up reverse mapping
     for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
         uint32_t tag = allTags[i];
-        String8 sectionString = desc->mSections[desc->mTagToSectionMap.valueFor(tag)];
+        String8 sectionString = mSections[mTagToSectionMap.valueFor(tag)];
 
         ssize_t reverseIndex = -1;
-        if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+        if ((reverseIndex = mReverseMapping.indexOfKey(sectionString)) < 0) {
             KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
-            reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+            reverseIndex = mReverseMapping.add(sectionString, nameMapper);
         }
-        desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+        mReverseMapping[reverseIndex]->add(mTagToNameMap.valueFor(tag), tag);
     }
 
-    descriptor = desc;
     return res;
 }
 
@@ -377,6 +333,92 @@
 
 }
 
+} // namespace params
+} // namespace camera2
+} // namespace hardware
+
+
+status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
+            /*out*/
+            sp<VendorTagDescriptor>& descriptor) {
+    if (vOps == NULL) {
+        ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    int tagCount = vOps->get_tag_count(vOps);
+    if (tagCount < 0 || tagCount > INT32_MAX) {
+        ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
+        return BAD_VALUE;
+    }
+
+    Vector<uint32_t> tagArray;
+    LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
+            "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
+
+    vOps->get_all_tags(vOps, /*out*/tagArray.editArray());
+
+    sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
+    desc->mTagCount = tagCount;
+
+    SortedVector<String8> sections;
+    KeyedVector<uint32_t, String8> tagToSectionMap;
+
+    for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
+        uint32_t tag = tagArray[i];
+        if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+            ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+            return BAD_VALUE;
+        }
+        const char *tagName = vOps->get_tag_name(vOps, tag);
+        if (tagName == NULL) {
+            ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
+            return BAD_VALUE;
+        }
+        desc->mTagToNameMap.add(tag, String8(tagName));
+        const char *sectionName = vOps->get_section_name(vOps, tag);
+        if (sectionName == NULL) {
+            ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag);
+            return BAD_VALUE;
+        }
+
+        String8 sectionString(sectionName);
+
+        sections.add(sectionString);
+        tagToSectionMap.add(tag, sectionString);
+
+        int tagType = vOps->get_tag_type(vOps, tag);
+        if (tagType < 0 || tagType >= NUM_TYPES) {
+            ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+            return BAD_VALUE;
+        }
+        desc->mTagToTypeMap.add(tag, tagType);
+    }
+
+    desc->mSections = sections;
+
+    for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
+        uint32_t tag = tagArray[i];
+        String8 sectionString = tagToSectionMap.valueFor(tag);
+
+        // Set up tag to section index map
+        ssize_t index = sections.indexOf(sectionString);
+        LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
+        desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
+
+        // Set up reverse mapping
+        ssize_t reverseIndex = -1;
+        if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+            KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+            reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+        }
+        desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+    }
+
+    descriptor = desc;
+    return OK;
+}
+
 status_t VendorTagDescriptor::setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc) {
     status_t res = OK;
     Mutex::Autolock al(sLock);
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/CameraInfo.aidl
similarity index 69%
copy from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
copy to camera/aidl/android/hardware/CameraInfo.aidl
index 9efeaba..c6a3a61 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/CameraInfo.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,7 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
-}
-
-}; //namespace camera2
-}; //namespace android
-
+/** @hide */
+parcelable CameraInfo cpp_header "camera/CameraBase.h";
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/ICamera.aidl
similarity index 65%
copy from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
copy to camera/aidl/android/hardware/ICamera.aidl
index 9efeaba..f9db842 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/ICamera.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,15 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
+/** @hide */
+interface ICamera
+{
+    /**
+     * Only one call exposed, for ICameraService testing purposes
+     *
+     * Keep up-to-date with frameworks/av/include/camera/ICamera.h
+     */
+    void disconnect();
 }
-
-}; //namespace camera2
-}; //namespace android
-
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/ICameraClient.aidl
similarity index 69%
copy from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
copy to camera/aidl/android/hardware/ICameraClient.aidl
index 9efeaba..808edee 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/ICameraClient.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,10 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
+/** @hide */
+interface ICameraClient
+{
+    // For now, empty because there is a manual implementation
 }
-
-}; //namespace camera2
-}; //namespace android
-
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
new file mode 100644
index 0000000..e94fd0c
--- /dev/null
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+import android.hardware.ICamera;
+import android.hardware.ICameraClient;
+import android.hardware.camera2.ICameraDeviceUser;
+import android.hardware.camera2.ICameraDeviceCallbacks;
+import android.hardware.camera2.params.VendorTagDescriptor;
+import android.hardware.camera2.impl.CameraMetadataNative;
+import android.hardware.ICameraServiceListener;
+import android.hardware.CameraInfo;
+
+/**
+ * Binder interface for the native camera service running in mediaserver.
+ *
+ * @hide
+ */
+interface ICameraService
+{
+    /**
+     * All camera service and device Binder calls may return a
+     * ServiceSpecificException with the following error codes
+     */
+    const int ERROR_PERMISSION_DENIED = 1;
+    const int ERROR_ALREADY_EXISTS = 2;
+    const int ERROR_ILLEGAL_ARGUMENT = 3;
+    const int ERROR_DISCONNECTED = 4;
+    const int ERROR_TIMED_OUT = 5;
+    const int ERROR_DISABLED = 6;
+    const int ERROR_CAMERA_IN_USE = 7;
+    const int ERROR_MAX_CAMERAS_IN_USE = 8;
+    const int ERROR_DEPRECATED_HAL = 9;
+    const int ERROR_INVALID_OPERATION = 10;
+
+    /**
+     * Types for getNumberOfCameras
+     */
+    const int CAMERA_TYPE_BACKWARD_COMPATIBLE = 0;
+    const int CAMERA_TYPE_ALL = 1;
+
+    /**
+     * Return the number of camera devices available in the system
+     */
+    int getNumberOfCameras(int type);
+
+    /**
+     * Fetch basic camera information for a camera device
+     */
+    CameraInfo getCameraInfo(int cameraId);
+
+    /**
+     * Default UID/PID values for non-privileged callers of
+     * connect(), connectDevice(), and connectLegacy()
+     */
+    const int USE_CALLING_UID = -1;
+    const int USE_CALLING_PID = -1;
+
+    /**
+     * Open a camera device through the old camera API
+     */
+    ICamera connect(ICameraClient client,
+            int cameraId,
+            String opPackageName,
+            int clientUid, int clientPid);
+
+    /**
+     * Open a camera device through the new camera API
+     * Only supported for device HAL versions >= 3.2
+     */
+    ICameraDeviceUser connectDevice(ICameraDeviceCallbacks callbacks,
+            int cameraId,
+            String opPackageName,
+            int clientUid);
+
+    /**
+     * halVersion constant for connectLegacy
+     */
+    const int CAMERA_HAL_API_VERSION_UNSPECIFIED = -1;
+
+    /**
+     * Open a camera device in legacy mode, if supported by the camera module HAL.
+     */
+    ICamera connectLegacy(ICameraClient client,
+            int cameraId,
+            int halVersion,
+            String opPackageName,
+            int clientUid);
+
+    /**
+     * Add/remove listeners for changes to camera device and flashlight state
+     */
+    void addListener(ICameraServiceListener listener);
+    void removeListener(ICameraServiceListener listener);
+
+    /**
+     * Read the static camera metadata for a camera device.
+     * Only supported for device HAL versions >= 3.2
+     */
+    CameraMetadataNative getCameraCharacteristics(int cameraId);
+
+    /**
+     * Read in the vendor tag descriptors from the camera module HAL.
+     * Intended to be used by the native code of CameraMetadataNative to correctly
+     * interpret camera metadata with vendor tags.
+     */
+    VendorTagDescriptor getCameraVendorTagDescriptor();
+
+    /**
+     * Read the legacy camera1 parameters into a String
+     */
+    String getLegacyParameters(int cameraId);
+
+    /**
+     * apiVersion constants for supportsCameraApi
+     */
+    const int API_VERSION_1 = 1;
+    const int API_VERSION_2 = 2;
+
+    // Determines if a particular API version is supported directly
+    boolean supportsCameraApi(int cameraId, int apiVersion);
+
+    void setTorchMode(String CameraId, boolean enabled, IBinder clientBinder);
+
+    /**
+     * Notify the camera service of a system event.  Should only be called from system_server.
+     *
+     * Callers require the android.permission.CAMERA_SEND_SYSTEM_EVENTS permission.
+     */
+    const int EVENT_NONE = 0;
+    const int EVENT_USER_SWITCHED = 1;
+    oneway void notifySystemEvent(int eventId, in int[] args);
+}
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
new file mode 100644
index 0000000..4e2a8c7
--- /dev/null
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/** @hide */
+interface ICameraServiceListener
+{
+
+    /**
+     * Initial status will be transmitted with onStatusChange immediately
+     * after this listener is added to the service listener list.
+     *
+     * Allowed transitions:
+     *
+     *     (Any)               -> NOT_PRESENT
+     *     NOT_PRESENT         -> PRESENT
+     *     NOT_PRESENT         -> ENUMERATING
+     *     ENUMERATING         -> PRESENT
+     *     PRESENT             -> NOT_AVAILABLE
+     *     NOT_AVAILABLE       -> PRESENT
+     *
+     * A state will never immediately transition back to itself.
+     *
+     * The enums must match the values in
+     * include/hardware/camera_common.h when applicable
+     */
+    // Device physically unplugged
+    const int STATUS_NOT_PRESENT      = 0;
+    // Device physically has been plugged in and the camera can be used exlusively
+    const int STATUS_PRESENT          = 1;
+    // Device physically has been plugged in but it will not be connect-able until enumeration is
+    // complete
+    const int STATUS_ENUMERATING      = 2;
+    // Camera is in use by another app and cannot be used exclusively
+    const int STATUS_NOT_AVAILABLE    = -2;
+
+    // Use to initialize variables only
+    const int STATUS_UNKNOWN          = -1;
+
+    oneway void onStatusChanged(int status, int cameraId);
+
+    /**
+     * The torch mode status of a camera.
+     *
+     * Initial status will be transmitted with onTorchStatusChanged immediately
+     * after this listener is added to the service listener list.
+     *
+     * The enums must match the values in
+     * include/hardware/camera_common.h
+     */
+    // The camera's torch mode has become not available to use via
+    // setTorchMode().
+    const int TORCH_STATUS_NOT_AVAILABLE = 0;
+    // The camera's torch mode is off and available to be turned on via
+    // setTorchMode().
+    const int TORCH_STATUS_AVAILABLE_OFF = 1;
+    // The camera's torch mode is on and available to be turned off via
+    // setTorchMode().
+    const int TORCH_STATUS_AVAILABLE_ON  = 2;
+
+    // Use to initialize variables only
+    const int TORCH_STATUS_UNKNOWN = -1;
+
+    oneway void onTorchStatusChanged(int status, String cameraId);
+}
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
new file mode 100644
index 0000000..0e654d5
--- /dev/null
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/**
+ * Binder interface for the camera service proxy running in system_server.
+ *
+ * Keep in sync with frameworks/av/include/camera/ICameraServiceProxy.h
+ *
+ * @hide
+ */
+interface ICameraServiceProxy
+{
+    /**
+     * Ping the service proxy to update the valid users for the camera service.
+     */
+    oneway void pingForUserUpdate();
+
+    /**
+     * Update the status of a camera device
+     */
+     oneway void notifyCameraState(String cameraId, int newCameraState);
+}
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/camera2/CaptureRequest.aidl
similarity index 69%
copy from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
copy to camera/aidl/android/hardware/camera2/CaptureRequest.aidl
index 9efeaba..9931fc7 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/camera2/CaptureRequest.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,7 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware.camera2;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
-}
-
-}; //namespace camera2
-}; //namespace android
-
+/** @hide */
+parcelable CaptureRequest cpp_header "camera/camera2/CaptureRequest.h";
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
new file mode 100644
index 0000000..755ec8e
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2;
+
+import android.hardware.camera2.impl.CameraMetadataNative;
+import android.hardware.camera2.impl.CaptureResultExtras;
+
+/** @hide */
+interface ICameraDeviceCallbacks
+{
+    // Error codes for onDeviceError
+    const int ERROR_CAMERA_INVALID_ERROR = -1; // To indicate all invalid error codes
+    const int ERROR_CAMERA_DISCONNECTED = 0;
+    const int ERROR_CAMERA_DEVICE = 1;
+    const int ERROR_CAMERA_SERVICE = 2;
+    const int ERROR_CAMERA_REQUEST = 3;
+    const int ERROR_CAMERA_RESULT = 4;
+    const int ERROR_CAMERA_BUFFER = 5;
+
+    oneway void onDeviceError(int errorCode, in CaptureResultExtras resultExtras);
+    oneway void onDeviceIdle();
+    oneway void onCaptureStarted(in CaptureResultExtras resultExtras, long timestamp);
+    oneway void onResultReceived(in CameraMetadataNative result,
+                                 in CaptureResultExtras resultExtras);
+    oneway void onPrepared(int streamId);
+
+    /**
+     * Repeating request encountered an error and was stopped.
+     *
+     * @param lastFrameNumber Frame number of the last frame of the streaming request.
+     */
+    oneway void onRepeatingRequestError(in long lastFrameNumber);
+}
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
new file mode 100644
index 0000000..1e8744b
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2;
+
+import android.hardware.camera2.CaptureRequest;
+import android.hardware.camera2.impl.CameraMetadataNative;
+import android.hardware.camera2.params.OutputConfiguration;
+import android.hardware.camera2.utils.SubmitInfo;
+import android.view.Surface;
+
+/** @hide */
+interface ICameraDeviceUser
+{
+    void disconnect();
+
+    const int NO_IN_FLIGHT_REPEATING_FRAMES = -1;
+
+    SubmitInfo submitRequest(in CaptureRequest request, boolean streaming);
+    SubmitInfo submitRequestList(in CaptureRequest[] requestList, boolean streaming);
+
+    /**
+     * Cancel the repeating request specified by requestId
+     * Returns the frame number of the last frame that will be produced from this
+     * repeating request, or NO_IN_FLIGHT_REPEATING_FRAMES if no frames were produced
+     * by this repeating request.
+     *
+     * Repeating request may be stopped by camera device due to an error. Canceling a stopped
+     * repeating request will trigger ERROR_ILLEGAL_ARGUMENT.
+     */
+    long cancelRequest(int requestId);
+
+    /**
+     * Begin the device configuration.
+     *
+     * <p>
+     * beginConfigure must be called before any call to deleteStream, createStream,
+     * or endConfigure.  It is not valid to call this when the device is not idle.
+     * <p>
+     */
+    void beginConfigure();
+
+    /**
+     * End the device configuration.
+     *
+     * <p>
+     * endConfigure must be called after stream configuration is complete (i.e. after
+     * a call to beginConfigure and subsequent createStream/deleteStream calls).  This
+     * must be called before any requests can be submitted.
+     * <p>
+     */
+    void endConfigure(boolean isConstrainedHighSpeed);
+
+    void deleteStream(int streamId);
+
+    /**
+     * Create an output stream
+     *
+     * <p>Create an output stream based on the given output configuration</p>
+     *
+     * @param outputConfiguration size, format, and other parameters for the stream
+     * @return new stream ID
+     */
+    int createStream(in OutputConfiguration outputConfiguration);
+
+    /**
+     * Create an input stream
+     *
+     * <p>Create an input stream of width, height, and format</p>
+     *
+     * @param width Width of the input buffers
+     * @param height Height of the input buffers
+     * @param format Format of the input buffers. One of HAL_PIXEL_FORMAT_*.
+     *
+     * @return new stream ID
+     */
+    int createInputStream(int width, int height, int format);
+
+    /**
+     * Get the surface of the input stream.
+     *
+     * <p>It's valid to call this method only after a stream configuration is completed
+     * successfully and the stream configuration includes a input stream.</p>
+     *
+     * @param surface An output argument for the surface of the input stream buffer queue.
+     */
+    Surface getInputSurface();
+
+    // Keep in sync with public API in
+    // frameworks/base/core/java/android/hardware/camera2/CameraDevice.java
+    const int TEMPLATE_PREVIEW = 1;
+    const int TEMPLATE_STILL_CAPTURE = 2;
+    const int TEMPLATE_RECORD = 3;
+    const int TEMPLATE_VIDEO_SNAPSHOT = 4;
+    const int TEMPLATE_ZERO_SHUTTER_LAG = 5;
+    const int TEMPLATE_MANUAL = 6;
+
+    CameraMetadataNative createDefaultRequest(int templateId);
+
+    CameraMetadataNative getCameraInfo();
+
+    void waitUntilIdle();
+
+    long flush();
+
+    void prepare(int streamId);
+
+    void tearDown(int streamId);
+
+    void prepare2(int maxCount, int streamId);
+}
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/camera2/impl/CameraMetadataNative.aidl
similarity index 69%
copy from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
copy to camera/aidl/android/hardware/camera2/impl/CameraMetadataNative.aidl
index 9efeaba..507f575 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/camera2/impl/CameraMetadataNative.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,7 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware.camera2.impl;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
-}
-
-}; //namespace camera2
-}; //namespace android
-
+/** @hide */
+parcelable CameraMetadataNative cpp_header "camera/CameraMetadata.h";
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/camera2/impl/CaptureResultExtras.aidl
similarity index 76%
rename from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
rename to camera/aidl/android/hardware/camera2/impl/CaptureResultExtras.aidl
index 9efeaba..5f47eda 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/camera2/impl/CaptureResultExtras.aidl
@@ -14,15 +14,7 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware.camera2.impl;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
-}
-
-}; //namespace camera2
-}; //namespace android
-
+/** @hide */
+parcelable CaptureResultExtras cpp_header "camera/CaptureResult.h";
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/camera2/params/OutputConfiguration.aidl
similarity index 69%
copy from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
copy to camera/aidl/android/hardware/camera2/params/OutputConfiguration.aidl
index 9efeaba..a8ad832 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/camera2/params/OutputConfiguration.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,7 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware.camera2.params;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
-}
-
-}; //namespace camera2
-}; //namespace android
-
+/** @hide */
+parcelable OutputConfiguration cpp_header "camera/camera2/OutputConfiguration.h";
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptor.aidl
similarity index 69%
copy from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
copy to camera/aidl/android/hardware/camera2/params/VendorTagDescriptor.aidl
index 9efeaba..9ee4263 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptor.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,7 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware.camera2.params;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
-}
-
-}; //namespace camera2
-}; //namespace android
-
+/** @hide */
+parcelable VendorTagDescriptor cpp_header "camera/VendorTagDescriptor.h";
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp b/camera/aidl/android/hardware/camera2/utils/SubmitInfo.aidl
similarity index 69%
copy from services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
copy to camera/aidl/android/hardware/camera2/utils/SubmitInfo.aidl
index 9efeaba..57531ad 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.cpp
+++ b/camera/aidl/android/hardware/camera2/utils/SubmitInfo.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,7 @@
  * limitations under the License.
  */
 
-#include "ZslProcessorInterface.h"
+package android.hardware.camera2.utils;
 
-namespace android {
-namespace camera2 {
-
-status_t ZslProcessorInterface::disconnect() {
-    return OK;
-}
-
-}; //namespace camera2
-}; //namespace android
-
+/** @hide */
+parcelable SubmitInfo cpp_header "camera/camera2/SubmitInfo.h";
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 4217bc6..fb43708 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -25,8 +25,10 @@
 #include <gui/Surface.h>
 
 namespace android {
+namespace hardware {
+namespace camera2 {
 
-status_t CaptureRequest::readFromParcel(Parcel* parcel) {
+status_t CaptureRequest::readFromParcel(const Parcel* parcel) {
     if (parcel == NULL) {
         ALOGE("%s: Null parcel", __FUNCTION__);
         return BAD_VALUE;
@@ -130,4 +132,6 @@
     return OK;
 }
 
-}; // namespace android
+} // namespace camera2
+} // namespace hardware
+} // namespace android
diff --git a/camera/camera2/ICameraDeviceCallbacks.cpp b/camera/camera2/ICameraDeviceCallbacks.cpp
deleted file mode 100644
index f599879..0000000
--- a/camera/camera2/ICameraDeviceCallbacks.cpp
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ICameraDeviceCallbacks"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-#include <utils/Mutex.h>
-
-#include <camera/camera2/ICameraDeviceCallbacks.h>
-#include "camera/CameraMetadata.h"
-#include "camera/CaptureResult.h"
-
-namespace android {
-
-enum {
-    CAMERA_ERROR = IBinder::FIRST_CALL_TRANSACTION,
-    CAMERA_IDLE,
-    CAPTURE_STARTED,
-    RESULT_RECEIVED,
-    PREPARED
-};
-
-class BpCameraDeviceCallbacks: public BpInterface<ICameraDeviceCallbacks>
-{
-public:
-    BpCameraDeviceCallbacks(const sp<IBinder>& impl)
-        : BpInterface<ICameraDeviceCallbacks>(impl)
-    {
-    }
-
-    void onDeviceError(CameraErrorCode errorCode, const CaptureResultExtras& resultExtras)
-    {
-        ALOGV("onDeviceError");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
-        data.writeInt32(static_cast<int32_t>(errorCode));
-        data.writeInt32(1); // to mark presence of CaptureResultExtras object
-        resultExtras.writeToParcel(&data);
-        remote()->transact(CAMERA_ERROR, data, &reply, IBinder::FLAG_ONEWAY);
-        data.writeNoException();
-    }
-
-    void onDeviceIdle()
-    {
-        ALOGV("onDeviceIdle");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
-        remote()->transact(CAMERA_IDLE, data, &reply, IBinder::FLAG_ONEWAY);
-        data.writeNoException();
-    }
-
-    void onCaptureStarted(const CaptureResultExtras& result, int64_t timestamp)
-    {
-        ALOGV("onCaptureStarted");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
-        data.writeInt32(1); // to mark presence of CaptureResultExtras object
-        result.writeToParcel(&data);
-        data.writeInt64(timestamp);
-        remote()->transact(CAPTURE_STARTED, data, &reply, IBinder::FLAG_ONEWAY);
-        data.writeNoException();
-    }
-
-    void onResultReceived(const CameraMetadata& metadata,
-            const CaptureResultExtras& resultExtras) {
-        ALOGV("onResultReceived");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
-        data.writeInt32(1); // to mark presence of metadata object
-        metadata.writeToParcel(&data);
-        data.writeInt32(1); // to mark presence of CaptureResult object
-        resultExtras.writeToParcel(&data);
-        remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
-        data.writeNoException();
-    }
-
-    void onPrepared(int streamId)
-    {
-        ALOGV("onPrepared");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
-        data.writeInt32(streamId);
-        remote()->transact(PREPARED, data, &reply, IBinder::FLAG_ONEWAY);
-        data.writeNoException();
-    }
-
-};
-
-IMPLEMENT_META_INTERFACE(CameraDeviceCallbacks,
-                         "android.hardware.camera2.ICameraDeviceCallbacks");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraDeviceCallbacks::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    ALOGV("onTransact - code = %d", code);
-    switch(code) {
-        case CAMERA_ERROR: {
-            ALOGV("onDeviceError");
-            CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
-            CameraErrorCode errorCode =
-                    static_cast<CameraErrorCode>(data.readInt32());
-            CaptureResultExtras resultExtras;
-            if (data.readInt32() != 0) {
-                resultExtras.readFromParcel(const_cast<Parcel*>(&data));
-            } else {
-                ALOGE("No CaptureResultExtras object is present!");
-            }
-            onDeviceError(errorCode, resultExtras);
-            data.readExceptionCode();
-            return NO_ERROR;
-        } break;
-        case CAMERA_IDLE: {
-            ALOGV("onDeviceIdle");
-            CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
-            onDeviceIdle();
-            data.readExceptionCode();
-            return NO_ERROR;
-        } break;
-        case CAPTURE_STARTED: {
-            ALOGV("onCaptureStarted");
-            CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
-            CaptureResultExtras result;
-            if (data.readInt32() != 0) {
-                result.readFromParcel(const_cast<Parcel*>(&data));
-            } else {
-                ALOGE("No CaptureResultExtras object is present in result!");
-            }
-            int64_t timestamp = data.readInt64();
-            onCaptureStarted(result, timestamp);
-            data.readExceptionCode();
-            return NO_ERROR;
-        } break;
-        case RESULT_RECEIVED: {
-            ALOGV("onResultReceived");
-            CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
-            CameraMetadata metadata;
-            if (data.readInt32() != 0) {
-                metadata.readFromParcel(const_cast<Parcel*>(&data));
-            } else {
-                ALOGW("No metadata object is present in result");
-            }
-            CaptureResultExtras resultExtras;
-            if (data.readInt32() != 0) {
-                resultExtras.readFromParcel(const_cast<Parcel*>(&data));
-            } else {
-                ALOGW("No capture result extras object is present in result");
-            }
-            onResultReceived(metadata, resultExtras);
-            data.readExceptionCode();
-            return NO_ERROR;
-        } break;
-        case PREPARED: {
-            ALOGV("onPrepared");
-            CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
-            CaptureResultExtras result;
-            int streamId = data.readInt32();
-            onPrepared(streamId);
-            data.readExceptionCode();
-            return NO_ERROR;
-        } break;
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
deleted file mode 100644
index 2a9fd2b..0000000
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ /dev/null
@@ -1,626 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "ICameraDeviceUser"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <binder/Parcel.h>
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-#include <camera/CameraMetadata.h>
-#include <camera/camera2/CaptureRequest.h>
-#include <camera/camera2/OutputConfiguration.h>
-
-namespace android {
-
-typedef Parcel::WritableBlob WritableBlob;
-typedef Parcel::ReadableBlob ReadableBlob;
-
-enum {
-    DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
-    SUBMIT_REQUEST,
-    SUBMIT_REQUEST_LIST,
-    CANCEL_REQUEST,
-    BEGIN_CONFIGURE,
-    END_CONFIGURE,
-    DELETE_STREAM,
-    CREATE_STREAM,
-    CREATE_INPUT_STREAM,
-    GET_INPUT_SURFACE,
-    CREATE_DEFAULT_REQUEST,
-    GET_CAMERA_INFO,
-    WAIT_UNTIL_IDLE,
-    FLUSH,
-    PREPARE,
-    TEAR_DOWN,
-    PREPARE2
-};
-
-namespace {
-    // Read empty strings without printing a false error message.
-    String16 readMaybeEmptyString16(const Parcel& parcel) {
-        size_t len;
-        const char16_t* str = parcel.readString16Inplace(&len);
-        if (str != NULL) {
-            return String16(str, len);
-        } else {
-            return String16();
-        }
-    }
-};
-
-class BpCameraDeviceUser : public BpInterface<ICameraDeviceUser>
-{
-public:
-    BpCameraDeviceUser(const sp<IBinder>& impl)
-        : BpInterface<ICameraDeviceUser>(impl)
-    {
-    }
-
-    // disconnect from camera service
-    void disconnect()
-    {
-        ALOGV("disconnect");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        remote()->transact(DISCONNECT, data, &reply);
-        reply.readExceptionCode();
-    }
-
-    virtual int submitRequest(sp<CaptureRequest> request, bool repeating,
-                              int64_t *lastFrameNumber)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-
-        // arg0 = CaptureRequest
-        if (request != 0) {
-            data.writeInt32(1);
-            request->writeToParcel(&data);
-        } else {
-            data.writeInt32(0);
-        }
-
-        // arg1 = streaming (bool)
-        data.writeInt32(repeating);
-
-        remote()->transact(SUBMIT_REQUEST, data, &reply);
-
-        reply.readExceptionCode();
-        status_t res = reply.readInt32();
-
-        status_t resFrameNumber = BAD_VALUE;
-        if (reply.readInt32() != 0) {
-            if (lastFrameNumber != NULL) {
-                resFrameNumber = reply.readInt64(lastFrameNumber);
-            }
-        }
-
-        if (res < 0 || (resFrameNumber != NO_ERROR)) {
-            res = FAILED_TRANSACTION;
-        }
-        return res;
-    }
-
-    virtual int submitRequestList(List<sp<CaptureRequest> > requestList, bool repeating,
-                                  int64_t *lastFrameNumber)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-
-        data.writeInt32(requestList.size());
-
-        for (List<sp<CaptureRequest> >::iterator it = requestList.begin();
-                it != requestList.end(); ++it) {
-            sp<CaptureRequest> request = *it;
-            if (request != 0) {
-                data.writeInt32(1);
-                if (request->writeToParcel(&data) != OK) {
-                    return BAD_VALUE;
-                }
-            } else {
-                data.writeInt32(0);
-            }
-        }
-
-        data.writeInt32(repeating);
-
-        remote()->transact(SUBMIT_REQUEST_LIST, data, &reply);
-
-        reply.readExceptionCode();
-        status_t res = reply.readInt32();
-
-        status_t resFrameNumber = BAD_VALUE;
-        if (reply.readInt32() != 0) {
-            if (lastFrameNumber != NULL) {
-                resFrameNumber = reply.readInt64(lastFrameNumber);
-            }
-        }
-        if (res < 0 || (resFrameNumber != NO_ERROR)) {
-            res = FAILED_TRANSACTION;
-        }
-        return res;
-    }
-
-    virtual status_t cancelRequest(int requestId, int64_t *lastFrameNumber)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(requestId);
-
-        remote()->transact(CANCEL_REQUEST, data, &reply);
-
-        reply.readExceptionCode();
-        status_t res = reply.readInt32();
-
-        status_t resFrameNumber = BAD_VALUE;
-        if (reply.readInt32() != 0) {
-            if (lastFrameNumber != NULL) {
-                resFrameNumber = reply.readInt64(lastFrameNumber);
-            }
-        }
-        if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
-            res = FAILED_TRANSACTION;
-        }
-        return res;
-    }
-
-    virtual status_t beginConfigure()
-    {
-        ALOGV("beginConfigure");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        remote()->transact(BEGIN_CONFIGURE, data, &reply);
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-    virtual status_t endConfigure(bool isConstrainedHighSpeed)
-    {
-        ALOGV("endConfigure");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(isConstrainedHighSpeed);
-
-        remote()->transact(END_CONFIGURE, data, &reply);
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-    virtual status_t deleteStream(int streamId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(streamId);
-
-        remote()->transact(DELETE_STREAM, data, &reply);
-
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-    virtual status_t createStream(const OutputConfiguration& outputConfiguration)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        if (outputConfiguration.getGraphicBufferProducer() != NULL) {
-            data.writeInt32(1); // marker that OutputConfiguration is not null. Mimic aidl behavior
-            outputConfiguration.writeToParcel(data);
-        } else {
-            data.writeInt32(0);
-        }
-        remote()->transact(CREATE_STREAM, data, &reply);
-
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-    virtual status_t createInputStream(int width, int height, int format)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(width);
-        data.writeInt32(height);
-        data.writeInt32(format);
-
-        remote()->transact(CREATE_INPUT_STREAM, data, &reply);
-
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-    // get the buffer producer of the input stream
-    virtual status_t getInputBufferProducer(
-            sp<IGraphicBufferProducer> *producer) {
-        if (producer == NULL) {
-            return BAD_VALUE;
-        }
-
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-
-        remote()->transact(GET_INPUT_SURFACE, data, &reply);
-
-        reply.readExceptionCode();
-        status_t result = reply.readInt32() ;
-        if (result != OK) {
-            return result;
-        }
-
-        sp<IGraphicBufferProducer> bp = NULL;
-        if (reply.readInt32() != 0) {
-            String16 name = readMaybeEmptyString16(reply);
-            bp = interface_cast<IGraphicBufferProducer>(
-                    reply.readStrongBinder());
-        }
-
-        *producer = bp;
-
-        return *producer == NULL ? INVALID_OPERATION : OK;
-    }
-
-    // Create a request object from a template.
-    virtual status_t createDefaultRequest(int templateId,
-                                          /*out*/
-                                          CameraMetadata* request)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(templateId);
-        remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply);
-
-        reply.readExceptionCode();
-        status_t result = reply.readInt32();
-
-        CameraMetadata out;
-        if (reply.readInt32() != 0) {
-            out.readFromParcel(&reply);
-        }
-
-        if (request != NULL) {
-            request->swap(out);
-        }
-        return result;
-    }
-
-
-    virtual status_t getCameraInfo(CameraMetadata* info)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        remote()->transact(GET_CAMERA_INFO, data, &reply);
-
-        reply.readExceptionCode();
-        status_t result = reply.readInt32();
-
-        CameraMetadata out;
-        if (reply.readInt32() != 0) {
-            out.readFromParcel(&reply);
-        }
-
-        if (info != NULL) {
-            info->swap(out);
-        }
-
-        return result;
-    }
-
-    virtual status_t waitUntilIdle()
-    {
-        ALOGV("waitUntilIdle");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        remote()->transact(WAIT_UNTIL_IDLE, data, &reply);
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-    virtual status_t flush(int64_t *lastFrameNumber)
-    {
-        ALOGV("flush");
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        remote()->transact(FLUSH, data, &reply);
-        reply.readExceptionCode();
-        status_t res = reply.readInt32();
-
-        status_t resFrameNumber = BAD_VALUE;
-        if (reply.readInt32() != 0) {
-            if (lastFrameNumber != NULL) {
-                resFrameNumber = reply.readInt64(lastFrameNumber);
-            }
-        }
-        if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
-            res = FAILED_TRANSACTION;
-        }
-        return res;
-    }
-
-    virtual status_t prepare(int streamId)
-    {
-        ALOGV("prepare");
-        Parcel data, reply;
-
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(streamId);
-
-        remote()->transact(PREPARE, data, &reply);
-
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-    virtual status_t prepare2(int maxCount, int streamId)
-    {
-        ALOGV("prepare2");
-        Parcel data, reply;
-
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(maxCount);
-        data.writeInt32(streamId);
-
-        remote()->transact(PREPARE2, data, &reply);
-
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-    virtual status_t tearDown(int streamId)
-    {
-        ALOGV("tearDown");
-        Parcel data, reply;
-
-        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-        data.writeInt32(streamId);
-
-        remote()->transact(TEAR_DOWN, data, &reply);
-
-        reply.readExceptionCode();
-        return reply.readInt32();
-    }
-
-private:
-
-
-};
-
-IMPLEMENT_META_INTERFACE(CameraDeviceUser,
-                         "android.hardware.camera2.ICameraDeviceUser");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraDeviceUser::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    switch(code) {
-        case DISCONNECT: {
-            ALOGV("DISCONNECT");
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            disconnect();
-            reply->writeNoException();
-            return NO_ERROR;
-        } break;
-        case SUBMIT_REQUEST: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
-            // arg0 = request
-            sp<CaptureRequest> request;
-            if (data.readInt32() != 0) {
-                request = new CaptureRequest();
-                request->readFromParcel(const_cast<Parcel*>(&data));
-            }
-
-            // arg1 = streaming (bool)
-            bool repeating = data.readInt32();
-
-            // return code: requestId (int32)
-            reply->writeNoException();
-            int64_t lastFrameNumber = -1;
-            reply->writeInt32(submitRequest(request, repeating, &lastFrameNumber));
-            reply->writeInt32(1);
-            reply->writeInt64(lastFrameNumber);
-
-            return NO_ERROR;
-        } break;
-        case SUBMIT_REQUEST_LIST: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
-            List<sp<CaptureRequest> > requestList;
-            int requestListSize = data.readInt32();
-            for (int i = 0; i < requestListSize; i++) {
-                if (data.readInt32() != 0) {
-                    sp<CaptureRequest> request = new CaptureRequest();
-                    if (request->readFromParcel(const_cast<Parcel*>(&data)) != OK) {
-                        return BAD_VALUE;
-                    }
-                    requestList.push_back(request);
-                } else {
-                    sp<CaptureRequest> request = 0;
-                    requestList.push_back(request);
-                    ALOGE("A request is missing. Sending in null request.");
-                }
-            }
-
-            bool repeating = data.readInt32();
-
-            reply->writeNoException();
-            int64_t lastFrameNumber = -1;
-            reply->writeInt32(submitRequestList(requestList, repeating, &lastFrameNumber));
-            reply->writeInt32(1);
-            reply->writeInt64(lastFrameNumber);
-
-            return NO_ERROR;
-        } break;
-        case CANCEL_REQUEST: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            int requestId = data.readInt32();
-            reply->writeNoException();
-            int64_t lastFrameNumber = -1;
-            reply->writeInt32(cancelRequest(requestId, &lastFrameNumber));
-            reply->writeInt32(1);
-            reply->writeInt64(lastFrameNumber);
-            return NO_ERROR;
-        } break;
-        case DELETE_STREAM: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            int streamId = data.readInt32();
-            reply->writeNoException();
-            reply->writeInt32(deleteStream(streamId));
-            return NO_ERROR;
-        } break;
-        case CREATE_STREAM: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
-            status_t ret = BAD_VALUE;
-            if (data.readInt32() != 0) {
-                OutputConfiguration outputConfiguration(data);
-                ret = createStream(outputConfiguration);
-            } else {
-                ALOGE("%s: cannot take an empty OutputConfiguration", __FUNCTION__);
-            }
-
-            reply->writeNoException();
-            ALOGV("%s: CREATE_STREAM: write noException", __FUNCTION__);
-            reply->writeInt32(ret);
-            ALOGV("%s: CREATE_STREAM: write ret = %d", __FUNCTION__, ret);
-
-            return NO_ERROR;
-        } break;
-        case CREATE_INPUT_STREAM: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            int width, height, format;
-
-            width = data.readInt32();
-            height = data.readInt32();
-            format = data.readInt32();
-            status_t ret = createInputStream(width, height, format);
-
-            reply->writeNoException();
-            reply->writeInt32(ret);
-            return NO_ERROR;
-
-        } break;
-        case GET_INPUT_SURFACE: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
-            sp<IGraphicBufferProducer> bp;
-            status_t ret = getInputBufferProducer(&bp);
-            sp<IBinder> b(IInterface::asBinder(ret == OK ? bp : NULL));
-
-            reply->writeNoException();
-            reply->writeInt32(ret);
-            reply->writeInt32(1);
-            reply->writeString16(String16("camera input")); // name of surface
-            reply->writeStrongBinder(b);
-
-            return NO_ERROR;
-        } break;
-        case CREATE_DEFAULT_REQUEST: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
-            int templateId = data.readInt32();
-
-            CameraMetadata request;
-            status_t ret;
-            ret = createDefaultRequest(templateId, &request);
-
-            reply->writeNoException();
-            reply->writeInt32(ret);
-
-            // out-variables are after exception and return value
-            reply->writeInt32(1); // to mark presence of metadata object
-            request.writeToParcel(const_cast<Parcel*>(reply));
-
-            return NO_ERROR;
-        } break;
-        case GET_CAMERA_INFO: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
-            CameraMetadata info;
-            status_t ret;
-            ret = getCameraInfo(&info);
-
-            reply->writeNoException();
-            reply->writeInt32(ret);
-
-            // out-variables are after exception and return value
-            reply->writeInt32(1); // to mark presence of metadata object
-            info.writeToParcel(reply);
-
-            return NO_ERROR;
-        } break;
-        case WAIT_UNTIL_IDLE: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            reply->writeNoException();
-            reply->writeInt32(waitUntilIdle());
-            return NO_ERROR;
-        } break;
-        case FLUSH: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            reply->writeNoException();
-            int64_t lastFrameNumber = -1;
-            reply->writeInt32(flush(&lastFrameNumber));
-            reply->writeInt32(1);
-            reply->writeInt64(lastFrameNumber);
-            return NO_ERROR;
-        }
-        case BEGIN_CONFIGURE: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            reply->writeNoException();
-            reply->writeInt32(beginConfigure());
-            return NO_ERROR;
-        } break;
-        case END_CONFIGURE: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            bool isConstrainedHighSpeed = data.readInt32();
-            reply->writeNoException();
-            reply->writeInt32(endConfigure(isConstrainedHighSpeed));
-            return NO_ERROR;
-        } break;
-        case PREPARE: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            int streamId = data.readInt32();
-            reply->writeNoException();
-            reply->writeInt32(prepare(streamId));
-            return NO_ERROR;
-        } break;
-        case TEAR_DOWN: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            int streamId = data.readInt32();
-            reply->writeNoException();
-            reply->writeInt32(tearDown(streamId));
-            return NO_ERROR;
-        } break;
-        case PREPARE2: {
-            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            int maxCount = data.readInt32();
-            int streamId = data.readInt32();
-            reply->writeNoException();
-            reply->writeInt32(prepare2(maxCount, streamId));
-            return NO_ERROR;
-        } break;
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 20a23e0..3247d0d 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -16,26 +16,19 @@
 */
 
 #define LOG_TAG "OutputConfiguration"
+//#define LOG_NDEBUG 0
+
 #include <utils/Log.h>
 
 #include <camera/camera2/OutputConfiguration.h>
+#include <gui/Surface.h>
 #include <binder/Parcel.h>
 
 namespace android {
 
 
 const int OutputConfiguration::INVALID_ROTATION = -1;
-
-// Read empty strings without printing a false error message.
-String16 OutputConfiguration::readMaybeEmptyString16(const Parcel& parcel) {
-    size_t len;
-    const char16_t* str = parcel.readString16Inplace(&len);
-    if (str != NULL) {
-        return String16(str, len);
-    } else {
-        return String16();
-    }
-}
+const int OutputConfiguration::INVALID_SET_ID = -1;
 
 sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
     return mGbp;
@@ -45,40 +38,80 @@
     return mRotation;
 }
 
-OutputConfiguration::OutputConfiguration(const Parcel& parcel) {
-    status_t err;
+int OutputConfiguration::getSurfaceSetID() const {
+    return mSurfaceSetID;
+}
+
+OutputConfiguration::OutputConfiguration() :
+        mRotation(INVALID_ROTATION),
+        mSurfaceSetID(INVALID_SET_ID) {
+}
+
+OutputConfiguration::OutputConfiguration(const Parcel& parcel) :
+        mRotation(INVALID_ROTATION),
+        mSurfaceSetID(INVALID_SET_ID) {
+    readFromParcel(&parcel);
+}
+
+status_t OutputConfiguration::readFromParcel(const Parcel* parcel) {
+    status_t err = OK;
     int rotation = 0;
-    if ((err = parcel.readInt32(&rotation)) != OK) {
+
+    if (parcel == nullptr) return BAD_VALUE;
+
+    if ((err = parcel->readInt32(&rotation)) != OK) {
         ALOGE("%s: Failed to read rotation from parcel", __FUNCTION__);
-        mGbp = NULL;
-        mRotation = INVALID_ROTATION;
-        return;
+        return err;
     }
 
-    String16 name = readMaybeEmptyString16(parcel);
-    const sp<IGraphicBufferProducer>& gbp =
-            interface_cast<IGraphicBufferProducer>(parcel.readStrongBinder());
-    mGbp = gbp;
-    mRotation = rotation;
+    int setID = INVALID_SET_ID;
+    if ((err = parcel->readInt32(&setID)) != OK) {
+        ALOGE("%s: Failed to read surface set ID from parcel", __FUNCTION__);
+        return err;
+    }
 
-    ALOGV("%s: OutputConfiguration: bp = %p, name = %s", __FUNCTION__,
-          gbp.get(), String8(name).string());
+    view::Surface surfaceShim;
+    if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
+        ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
+        return err;
+    }
+
+    mGbp = surfaceShim.graphicBufferProducer;
+    mRotation = rotation;
+    mSurfaceSetID = setID;
+
+    ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d", __FUNCTION__,
+            mGbp.get(), String8(surfaceShim.name).string(), mRotation, mSurfaceSetID);
+
+    return err;
 }
 
-OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation) {
+OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
+        int surfaceSetID) {
     mGbp = gbp;
     mRotation = rotation;
+    mSurfaceSetID = surfaceSetID;
 }
 
-status_t OutputConfiguration::writeToParcel(Parcel& parcel) const {
+status_t OutputConfiguration::writeToParcel(Parcel* parcel) const {
 
-    parcel.writeInt32(mRotation);
-    parcel.writeString16(String16("unknown_name")); // name of surface
-    sp<IBinder> b(IInterface::asBinder(mGbp));
-    parcel.writeStrongBinder(b);
+    if (parcel == nullptr) return BAD_VALUE;
+    status_t err = OK;
+
+    err = parcel->writeInt32(mRotation);
+    if (err != OK) return err;
+
+    err = parcel->writeInt32(mSurfaceSetID);
+    if (err != OK) return err;
+
+    view::Surface surfaceShim;
+    surfaceShim.name = String16("unknown_name"); // name of surface
+    surfaceShim.graphicBufferProducer = mGbp;
+
+    err = surfaceShim.writeToParcel(parcel);
+    if (err != OK) return err;
 
     return OK;
 }
 
 }; // namespace android
-
diff --git a/camera/camera2/SubmitInfo.cpp b/camera/camera2/SubmitInfo.cpp
new file mode 100644
index 0000000..d739c79
--- /dev/null
+++ b/camera/camera2/SubmitInfo.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "camera/camera2/SubmitInfo.h"
+
+namespace android {
+namespace hardware {
+namespace camera2 {
+namespace utils {
+
+status_t SubmitInfo::writeToParcel(Parcel *parcel) const {
+    status_t res;
+    if (parcel == nullptr) return BAD_VALUE;
+
+    res = parcel->writeInt32(mRequestId);
+    if (res != OK) return res;
+
+    res = parcel->writeInt64(mLastFrameNumber);
+    return res;
+}
+
+status_t SubmitInfo::readFromParcel(const Parcel *parcel) {
+    status_t res;
+    if (parcel == nullptr) return BAD_VALUE;
+
+    res = parcel->readInt32(&mRequestId);
+    if (res != OK) return res;
+
+    res = parcel->readInt64(&mLastFrameNumber);
+    return res;
+}
+
+} // namespace utils
+} // namespace camera2
+} // namespace hardware
+} // namespace android
diff --git a/camera/cameraserver/Android.mk b/camera/cameraserver/Android.mk
new file mode 100644
index 0000000..7e36c5e
--- /dev/null
+++ b/camera/cameraserver/Android.mk
@@ -0,0 +1,36 @@
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+	main_cameraserver.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+	libcameraservice \
+	libcutils \
+	libutils \
+	libbinder \
+	libcamera_client
+
+LOCAL_MODULE:= cameraserver
+LOCAL_32_BIT_ONLY := true
+
+LOCAL_CFLAGS += -Wall -Wextra -Werror -Wno-unused-parameter
+
+LOCAL_INIT_RC := cameraserver.rc
+
+include $(BUILD_EXECUTABLE)
diff --git a/camera/cameraserver/cameraserver.rc b/camera/cameraserver/cameraserver.rc
new file mode 100644
index 0000000..16d9da8
--- /dev/null
+++ b/camera/cameraserver/cameraserver.rc
@@ -0,0 +1,6 @@
+service cameraserver /system/bin/cameraserver
+    class main
+    user cameraserver
+    group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/camera/cameraserver/main_cameraserver.cpp b/camera/cameraserver/main_cameraserver.cpp
new file mode 100644
index 0000000..f4be468
--- /dev/null
+++ b/camera/cameraserver/main_cameraserver.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "cameraserver"
+//#define LOG_NDEBUG 0
+
+// from LOCAL_C_INCLUDES
+#include "CameraService.h"
+
+using namespace android;
+
+int main(int argc __unused, char** argv __unused)
+{
+    signal(SIGPIPE, SIG_IGN);
+
+    sp<ProcessState> proc(ProcessState::self());
+    sp<IServiceManager> sm = defaultServiceManager();
+    ALOGI("ServiceManager: %p", sm.get());
+    CameraService::instantiate();
+    ProcessState::self()->startThreadPool();
+    IPCThreadState::self()->joinThreadPool();
+}
diff --git a/camera/ndk/Android.mk b/camera/ndk/Android.mk
new file mode 100644
index 0000000..40dbeef
--- /dev/null
+++ b/camera/ndk/Android.mk
@@ -0,0 +1,58 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+ifneq ($(TARGET_BUILD_PDK), true)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:=                  \
+    NdkCameraManager.cpp           \
+    NdkCameraMetadata.cpp          \
+    NdkCameraDevice.cpp            \
+    NdkCaptureRequest.cpp          \
+    NdkCameraCaptureSession.cpp    \
+    impl/ACameraManager.cpp        \
+    impl/ACameraMetadata.cpp       \
+    impl/ACameraDevice.cpp         \
+    impl/ACameraCaptureSession.cpp
+
+LOCAL_MODULE:= libcamera2ndk
+
+LOCAL_C_INCLUDES := \
+    frameworks/av/include/camera/ndk \
+    frameworks/av/include/ndk
+
+LOCAL_CFLAGS += -fvisibility=hidden -D EXPORT='__attribute__ ((visibility ("default")))'
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
+LOCAL_SHARED_LIBRARIES := \
+    libbinder \
+    liblog \
+    libgui \
+    libutils \
+    libandroid_runtime \
+    libcamera_client \
+    libstagefright_foundation \
+    libcutils \
+    libcamera_metadata
+
+LOCAL_CLANG := true
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif
diff --git a/camera/ndk/NdkCameraCaptureSession.cpp b/camera/ndk/NdkCameraCaptureSession.cpp
new file mode 100644
index 0000000..d6eff24
--- /dev/null
+++ b/camera/ndk/NdkCameraCaptureSession.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkCameraCaptureSession"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+#include <utils/Trace.h>
+
+#include "NdkCameraDevice.h"
+#include <NdkCaptureRequest.h>
+#include <NdkCameraCaptureSession.h>
+#include "impl/ACameraCaptureSession.h"
+
+using namespace android;
+
+EXPORT
+void ACameraCaptureSession_close(ACameraCaptureSession* session) {
+    ATRACE_CALL();
+    if (session != nullptr) {
+        session->closeByApp();
+    }
+    return;
+}
+
+EXPORT
+camera_status_t ACameraCaptureSession_getDevice(
+        ACameraCaptureSession* session, ACameraDevice **device) {
+    ATRACE_CALL();
+    if (session == nullptr || device == nullptr) {
+        ALOGE("%s: Error: invalid input: session %p, device %p",
+                __FUNCTION__, session, device);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (session->isClosed()) {
+        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+        *device = nullptr;
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+
+    *device = session->getDevice();
+    if (*device == nullptr) {
+        // Should not reach here
+        ALOGE("%s: unknown failure: device is null", __FUNCTION__);
+        return ACAMERA_ERROR_UNKNOWN;
+    }
+    return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACameraCaptureSession_capture(
+        ACameraCaptureSession* session, /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    ATRACE_CALL();
+    if (session == nullptr || requests == nullptr || numRequests < 1) {
+        ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
+                __FUNCTION__, session, numRequests, requests);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (session->isClosed()) {
+        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+        *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+
+    return session->capture(cbs, numRequests, requests, captureSequenceId);
+}
+
+EXPORT
+camera_status_t ACameraCaptureSession_setRepeatingRequest(
+        ACameraCaptureSession* session, /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    ATRACE_CALL();
+    if (session == nullptr || requests == nullptr || numRequests < 1) {
+        ALOGE("%s: Error: invalid input: session %p, numRequest %d, requests %p",
+                __FUNCTION__, session, numRequests, requests);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (session->isClosed()) {
+        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+        *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+
+    return session->setRepeatingRequest(cbs, numRequests, requests, captureSequenceId);
+}
+
+EXPORT
+camera_status_t ACameraCaptureSession_stopRepeating(ACameraCaptureSession* session) {
+    ATRACE_CALL();
+    if (session == nullptr) {
+        ALOGE("%s: Error: session is null", __FUNCTION__);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (session->isClosed()) {
+        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+    return session->stopRepeating();
+}
+
+EXPORT
+camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session) {
+    ATRACE_CALL();
+    if (session == nullptr) {
+        ALOGE("%s: Error: session is null", __FUNCTION__);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (session->isClosed()) {
+        ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+    return session->abortCaptures();
+}
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
new file mode 100644
index 0000000..281d3e7
--- /dev/null
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkCameraDevice"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <NdkCameraDevice.h>
+#include "impl/ACameraCaptureSession.h"
+
+using namespace android;
+
+EXPORT
+camera_status_t ACameraDevice_close(ACameraDevice* device) {
+    ATRACE_CALL();
+    if (device == nullptr) {
+        ALOGE("%s: invalid argument! device is null", __FUNCTION__);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    delete device;
+    return ACAMERA_OK;
+}
+
+EXPORT
+const char* ACameraDevice_getId(const ACameraDevice* device) {
+    ATRACE_CALL();
+    if (device == nullptr) {
+        ALOGE("%s: invalid argument! device is null", __FUNCTION__);
+        return nullptr;
+    }
+    return device->getId();
+}
+
+EXPORT
+camera_status_t ACameraDevice_createCaptureRequest(
+        const ACameraDevice* device,
+        ACameraDevice_request_template templateId,
+        ACaptureRequest** request) {
+    ATRACE_CALL();
+    if (device == nullptr || request == nullptr) {
+        ALOGE("%s: invalid argument! device %p request %p",
+                __FUNCTION__, device, request);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    switch (templateId) {
+        case TEMPLATE_PREVIEW:
+        case TEMPLATE_STILL_CAPTURE:
+        case TEMPLATE_RECORD:
+        case TEMPLATE_VIDEO_SNAPSHOT:
+        case TEMPLATE_ZERO_SHUTTER_LAG:
+        case TEMPLATE_MANUAL:
+            break;
+        default:
+            ALOGE("%s: unknown template ID %d", __FUNCTION__, templateId);
+            return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return device->createCaptureRequest(templateId, request);
+}
+
+EXPORT
+camera_status_t ACaptureSessionOutputContainer_create(
+        /*out*/ACaptureSessionOutputContainer** out) {
+    ATRACE_CALL();
+    if (out == nullptr) {
+        ALOGE("%s: Error: out null", __FUNCTION__);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    *out = new ACaptureSessionOutputContainer();
+    return ACAMERA_OK;
+}
+
+EXPORT
+void ACaptureSessionOutputContainer_free(ACaptureSessionOutputContainer* container) {
+    ATRACE_CALL();
+    if (container != nullptr) {
+        delete container;
+    }
+    return;
+}
+
+EXPORT
+camera_status_t ACaptureSessionOutput_create(
+        ANativeWindow* window, /*out*/ACaptureSessionOutput** out) {
+    ATRACE_CALL();
+    if (window == nullptr || out == nullptr) {
+        ALOGE("%s: Error: bad argument. window %p, out %p",
+                __FUNCTION__, window, out);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    *out = new ACaptureSessionOutput(window);
+    return ACAMERA_OK;
+}
+
+EXPORT
+void ACaptureSessionOutput_free(ACaptureSessionOutput* output) {
+    ATRACE_CALL();
+    if (output != nullptr) {
+        delete output;
+    }
+    return;
+}
+
+EXPORT
+camera_status_t ACaptureSessionOutputContainer_add(
+        ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output) {
+    ATRACE_CALL();
+    if (container == nullptr || output == nullptr) {
+        ALOGE("%s: Error: invalid input: container %p, output %p",
+                __FUNCTION__, container, output);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    auto pair = container->mOutputs.insert(*output);
+    if (!pair.second) {
+        ALOGW("%s: output %p already exists!", __FUNCTION__, output);
+    }
+    return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACaptureSessionOutputContainer_remove(
+        ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output) {
+    ATRACE_CALL();
+    if (container == nullptr || output == nullptr) {
+        ALOGE("%s: Error: invalid input: container %p, output %p",
+                __FUNCTION__, container, output);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    container->mOutputs.erase(*output);
+    return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACameraDevice_createCaptureSession(
+        ACameraDevice* device,
+        const ACaptureSessionOutputContainer*       outputs,
+        const ACameraCaptureSession_stateCallbacks* callbacks,
+        /*out*/ACameraCaptureSession** session) {
+    ATRACE_CALL();
+    if (device == nullptr || outputs == nullptr || callbacks == nullptr || session == nullptr) {
+        ALOGE("%s: Error: invalid input: device %p, outputs %p, callbacks %p, session %p",
+                __FUNCTION__, device, outputs, callbacks, session);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return device->createCaptureSession(outputs, callbacks, session);
+}
diff --git a/camera/ndk/NdkCameraManager.cpp b/camera/ndk/NdkCameraManager.cpp
new file mode 100644
index 0000000..ff15263
--- /dev/null
+++ b/camera/ndk/NdkCameraManager.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkCameraManager"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <NdkCameraManager.h>
+#include "impl/ACameraManager.h"
+
+using namespace android;
+
+EXPORT
+ACameraManager* ACameraManager_create() {
+    ATRACE_CALL();
+    return new ACameraManager();
+}
+
+EXPORT
+void ACameraManager_delete(ACameraManager* manager) {
+    ATRACE_CALL();
+    if (manager != nullptr) {
+        delete manager;
+    }
+}
+
+EXPORT
+camera_status_t ACameraManager_getCameraIdList(
+        ACameraManager* manager, ACameraIdList** cameraIdList) {
+    ATRACE_CALL();
+    if (manager == nullptr || cameraIdList == nullptr) {
+        ALOGE("%s: invalid argument! manager %p, cameraIdList %p",
+              __FUNCTION__, manager, cameraIdList);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return manager->getCameraIdList(cameraIdList);
+}
+
+EXPORT
+void ACameraManager_deleteCameraIdList(ACameraIdList* cameraIdList) {
+    ATRACE_CALL();
+    if (cameraIdList != nullptr) {
+        ACameraManager::deleteCameraIdList(cameraIdList);
+    }
+}
+
+EXPORT
+camera_status_t ACameraManager_registerAvailabilityCallback(
+        ACameraManager*, const ACameraManager_AvailabilityCallbacks *callback) {
+    ATRACE_CALL();
+    if (callback == nullptr) {
+        ALOGE("%s: invalid argument! callback is null!", __FUNCTION__);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    if (callback->onCameraAvailable == nullptr || callback->onCameraUnavailable == nullptr) {
+        ALOGE("%s: invalid argument! callback %p, "
+                "onCameraAvailable %p, onCameraUnavailable %p",
+               __FUNCTION__, callback,
+               callback->onCameraAvailable, callback->onCameraUnavailable);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    CameraManagerGlobal::getInstance().registerAvailabilityCallback(callback);
+    return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACameraManager_unregisterAvailabilityCallback(
+        ACameraManager*, const ACameraManager_AvailabilityCallbacks *callback) {
+    ATRACE_CALL();
+    if (callback == nullptr) {
+        ALOGE("%s: invalid argument! callback is null!", __FUNCTION__);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    if (callback->onCameraAvailable == nullptr || callback->onCameraUnavailable == nullptr) {
+        ALOGE("%s: invalid argument! callback %p, "
+                "onCameraAvailable %p, onCameraUnavailable %p",
+               __FUNCTION__, callback,
+               callback->onCameraAvailable, callback->onCameraUnavailable);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    CameraManagerGlobal::getInstance().unregisterAvailabilityCallback(callback);
+    return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACameraManager_getCameraCharacteristics(
+        ACameraManager* mgr, const char* cameraId, ACameraMetadata** chars){
+    ATRACE_CALL();
+    if (mgr == nullptr || cameraId == nullptr || chars == nullptr) {
+        ALOGE("%s: invalid argument! mgr %p cameraId %p chars %p",
+                __FUNCTION__, mgr, cameraId, chars);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return mgr->getCameraCharacteristics(cameraId, chars);
+}
+
+EXPORT
+camera_status_t ACameraManager_openCamera(
+        ACameraManager* mgr, const char* cameraId,
+        ACameraDevice_StateCallbacks* callback,
+        /*out*/ACameraDevice** device) {
+    ATRACE_CALL();
+    if (mgr == nullptr || cameraId == nullptr || callback == nullptr || device == nullptr) {
+        ALOGE("%s: invalid argument! mgr %p cameraId %p callback %p device %p",
+                __FUNCTION__, mgr, cameraId, callback, device);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return mgr->openCamera(cameraId, callback, device);
+}
diff --git a/camera/ndk/NdkCameraMetadata.cpp b/camera/ndk/NdkCameraMetadata.cpp
new file mode 100644
index 0000000..85fe75b
--- /dev/null
+++ b/camera/ndk/NdkCameraMetadata.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkCameraMetadata"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "NdkCameraMetadata.h"
+#include "impl/ACameraMetadata.h"
+
+using namespace android;
+
+EXPORT
+camera_status_t ACameraMetadata_getConstEntry(
+        const ACameraMetadata* acm, uint32_t tag, ACameraMetadata_const_entry* entry) {
+    ATRACE_CALL();
+    if (acm == nullptr || entry == nullptr) {
+        ALOGE("%s: invalid argument! metadata %p, tag 0x%x, entry %p",
+               __FUNCTION__, acm, tag, entry);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return acm->getConstEntry(tag, entry);
+}
+
+EXPORT
+camera_status_t ACameraMetadata_getAllTags(
+        const ACameraMetadata* acm, /*out*/int32_t* numTags, /*out*/const uint32_t** tags) {
+    ATRACE_CALL();
+    if (acm == nullptr || numTags == nullptr || tags == nullptr) {
+        ALOGE("%s: invalid argument! metadata %p, numTags %p, tags %p",
+               __FUNCTION__, acm, numTags, tags);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return acm->getTags(numTags, tags);
+}
+
+EXPORT
+ACameraMetadata* ACameraMetadata_copy(const ACameraMetadata* src) {
+    ATRACE_CALL();
+    if (src == nullptr) {
+        ALOGE("%s: src is null!", __FUNCTION__);
+        return nullptr;
+    }
+    return new ACameraMetadata(*src);
+}
+
+EXPORT
+void ACameraMetadata_free(ACameraMetadata* metadata) {
+    ATRACE_CALL();
+    if (metadata != nullptr) {
+        delete metadata;
+    }
+}
diff --git a/camera/ndk/NdkCaptureRequest.cpp b/camera/ndk/NdkCaptureRequest.cpp
new file mode 100644
index 0000000..77b9a33
--- /dev/null
+++ b/camera/ndk/NdkCaptureRequest.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkCaptureRequest"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "NdkCaptureRequest.h"
+#include "impl/ACameraMetadata.h"
+#include "impl/ACaptureRequest.h"
+
+EXPORT
+camera_status_t ACameraOutputTarget_create(
+        ANativeWindow* window, ACameraOutputTarget** out) {
+    ATRACE_CALL();
+    if (window == nullptr) {
+        ALOGE("%s: Error: input window is null", __FUNCTION__);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    *out = new ACameraOutputTarget(window);
+    return ACAMERA_OK;
+}
+
+EXPORT
+void ACameraOutputTarget_free(ACameraOutputTarget* target) {
+    ATRACE_CALL();
+    if (target != nullptr) {
+        delete target;
+    }
+    return;
+}
+
+EXPORT
+camera_status_t ACaptureRequest_addTarget(
+        ACaptureRequest* req, const ACameraOutputTarget* target) {
+    ATRACE_CALL();
+    if (req == nullptr || req->targets == nullptr || target == nullptr) {
+        ALOGE("%s: Error: invalid input: req %p, req-targets %p, target %p",
+                __FUNCTION__, req, req->targets, target);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    auto pair = req->targets->mOutputs.insert(*target);
+    if (!pair.second) {
+        ALOGW("%s: target %p already exists!", __FUNCTION__, target);
+    }
+    return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACaptureRequest_removeTarget(
+        ACaptureRequest* req, const ACameraOutputTarget* target) {
+    ATRACE_CALL();
+    if (req == nullptr || req->targets == nullptr || target == nullptr) {
+        ALOGE("%s: Error: invalid input: req %p, req-targets %p, target %p",
+                __FUNCTION__, req, req->targets, target);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    req->targets->mOutputs.erase(*target);
+    return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACaptureRequest_getConstEntry(
+        const ACaptureRequest* req, uint32_t tag, ACameraMetadata_const_entry* entry) {
+    ATRACE_CALL();
+    if (req == nullptr || entry == nullptr) {
+        ALOGE("%s: invalid argument! req 0x%p, tag 0x%x, entry 0x%p",
+               __FUNCTION__, req, tag, entry);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return req->settings->getConstEntry(tag, entry);
+}
+
+EXPORT
+camera_status_t ACaptureRequest_getAllTags(
+        const ACaptureRequest* req, /*out*/int32_t* numTags, /*out*/const uint32_t** tags) {
+    ATRACE_CALL();
+    if (req == nullptr || numTags == nullptr || tags == nullptr) {
+        ALOGE("%s: invalid argument! request %p, numTags %p, tags %p",
+               __FUNCTION__, req, numTags, tags);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    return req->settings->getTags(numTags, tags);
+}
+
+#define SET_ENTRY(NAME,NDK_TYPE)                                                        \
+EXPORT                                                                                  \
+camera_status_t ACaptureRequest_setEntry_##NAME(                                        \
+        ACaptureRequest* req, uint32_t tag, uint32_t count, const NDK_TYPE* data) {     \
+    ATRACE_CALL();                                                                      \
+    if (req == nullptr || (count > 0 && data == nullptr)) {                             \
+        ALOGE("%s: invalid argument! req %p, tag 0x%x, count %d, data 0x%p",            \
+               __FUNCTION__, req, tag, count, data);                                    \
+        return ACAMERA_ERROR_INVALID_PARAMETER;                                         \
+    }                                                                                   \
+    return req->settings->update(tag, count, data);                                     \
+}
+
+SET_ENTRY(u8,uint8_t)
+SET_ENTRY(i32,int32_t)
+SET_ENTRY(float,float)
+SET_ENTRY(double,double)
+SET_ENTRY(i64,int64_t)
+SET_ENTRY(rational,ACameraMetadata_rational)
+
+#undef SET_ENTRY
+
+EXPORT
+void ACaptureRequest_free(ACaptureRequest* request) {
+    ATRACE_CALL();
+    if (request == nullptr) {
+        return;
+    }
+    delete request->settings;
+    delete request->targets;
+    delete request;
+    return;
+}
diff --git a/camera/ndk/impl/ACameraCaptureSession.cpp b/camera/ndk/impl/ACameraCaptureSession.cpp
new file mode 100644
index 0000000..b9c159d
--- /dev/null
+++ b/camera/ndk/impl/ACameraCaptureSession.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ACameraCaptureSession"
+
+#include "ACameraCaptureSession.h"
+
+using namespace android;
+
+ACameraCaptureSession::~ACameraCaptureSession() {
+    ALOGV("~ACameraCaptureSession: %p notify device end of life", this);
+    sp<CameraDevice> dev = getDeviceSp();
+    if (dev != nullptr && !dev->isClosed()) {
+        dev->lockDeviceForSessionOps();
+        {
+            Mutex::Autolock _l(mSessionLock);
+            dev->notifySessionEndOfLifeLocked(this);
+        }
+        dev->unlockDevice();
+    }
+    // Fire onClosed callback
+    (*mUserSessionCallback.onClosed)(mUserSessionCallback.context, this);
+    ALOGV("~ACameraCaptureSession: %p is deleted", this);
+}
+
+void
+ACameraCaptureSession::closeByApp() {
+    {
+        Mutex::Autolock _l(mSessionLock);
+        if (mClosedByApp) {
+            // Do not close twice
+            return;
+        }
+        mClosedByApp = true;
+    }
+
+    sp<CameraDevice> dev = getDeviceSp();
+    if (dev != nullptr) {
+        dev->lockDeviceForSessionOps();
+    }
+
+    {
+        Mutex::Autolock _l(mSessionLock);
+
+        if (!mIsClosed && dev != nullptr) {
+            camera_status_t ret = dev->stopRepeatingLocked();
+            if (ret != ACAMERA_OK) {
+                ALOGE("Stop repeating request failed while closing session %p", this);
+            }
+        }
+        mIsClosed = true;
+    }
+
+    if (dev != nullptr) {
+        dev->unlockDevice();
+    }
+    this->decStrong((void*) ACameraDevice_createCaptureSession);
+}
+
+camera_status_t
+ACameraCaptureSession::stopRepeating() {
+    sp<CameraDevice> dev = getDeviceSp();
+    if (dev == nullptr) {
+        ALOGE("Error: Device associated with session %p has been closed!", this);
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+
+    camera_status_t ret;
+    dev->lockDeviceForSessionOps();
+    {
+        Mutex::Autolock _l(mSessionLock);
+        ret = dev->stopRepeatingLocked();
+    }
+    dev->unlockDevice();
+    return ret;
+}
+
+camera_status_t
+ACameraCaptureSession::abortCaptures() {
+    sp<CameraDevice> dev = getDeviceSp();
+    if (dev == nullptr) {
+        ALOGE("Error: Device associated with session %p has been closed!", this);
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+
+    camera_status_t ret;
+    dev->lockDeviceForSessionOps();
+    {
+        Mutex::Autolock _l(mSessionLock);
+        ret = dev->flushLocked(this);
+    }
+    dev->unlockDevice();
+    return ret;
+}
+
+camera_status_t
+ACameraCaptureSession::setRepeatingRequest(
+        /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    sp<CameraDevice> dev = getDeviceSp();
+    if (dev == nullptr) {
+        ALOGE("Error: Device associated with session %p has been closed!", this);
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+
+    camera_status_t ret;
+    dev->lockDeviceForSessionOps();
+    {
+        Mutex::Autolock _l(mSessionLock);
+        ret = dev->setRepeatingRequestsLocked(
+                this, cbs, numRequests, requests, captureSequenceId);
+    }
+    dev->unlockDevice();
+    return ret;
+}
+
+camera_status_t ACameraCaptureSession::capture(
+        /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    sp<CameraDevice> dev = getDeviceSp();
+    if (dev == nullptr) {
+        ALOGE("Error: Device associated with session %p has been closed!", this);
+        return ACAMERA_ERROR_SESSION_CLOSED;
+    }
+    camera_status_t ret;
+    dev->lockDeviceForSessionOps();
+    {
+        Mutex::Autolock _l(mSessionLock);
+        ret = dev->captureLocked(this, cbs, numRequests, requests, captureSequenceId);
+    }
+    dev->unlockDevice();
+    return ret;
+}
+
+ACameraDevice*
+ACameraCaptureSession::getDevice() {
+    Mutex::Autolock _l(mSessionLock);
+    sp<CameraDevice> dev = getDeviceSp();
+    if (dev == nullptr) {
+        ALOGE("Error: Device associated with session %p has been closed!", this);
+        return nullptr;
+    }
+    return dev->getWrapper();
+}
+
+void
+ACameraCaptureSession::closeByDevice() {
+    Mutex::Autolock _l(mSessionLock);
+    mIsClosed = true;
+}
+
+sp<CameraDevice>
+ACameraCaptureSession::getDeviceSp() {
+    sp<CameraDevice> device = mDevice.promote();
+    if (device == nullptr || device->isClosed()) {
+        ALOGW("Device is closed but session %d is not notified", mId);
+        return nullptr;
+    }
+    return device;
+}
+
+
diff --git a/camera/ndk/impl/ACameraCaptureSession.h b/camera/ndk/impl/ACameraCaptureSession.h
new file mode 100644
index 0000000..58428e6
--- /dev/null
+++ b/camera/ndk/impl/ACameraCaptureSession.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _ACAMERA_CAPTURE_SESSION_H
+#define _ACAMERA_CAPTURE_SESSION_H
+
+#include <set>
+#include <hardware/camera3.h>
+#include <NdkCameraDevice.h>
+#include "ACameraDevice.h"
+
+using namespace android;
+
+struct ACaptureSessionOutput {
+    ACaptureSessionOutput(ANativeWindow* window) : mWindow(window) {};
+
+    bool operator == (const ACaptureSessionOutput& other) const {
+        return mWindow == other.mWindow;
+    }
+    bool operator != (const ACaptureSessionOutput& other) const {
+        return mWindow != other.mWindow;
+    }
+    bool operator < (const ACaptureSessionOutput& other) const {
+        return mWindow < other.mWindow;
+    }
+    bool operator > (const ACaptureSessionOutput& other) const {
+        return mWindow > other.mWindow;
+    }
+
+    ANativeWindow* mWindow;
+    int            mRotation = CAMERA3_STREAM_ROTATION_0;
+};
+
+struct ACaptureSessionOutputContainer {
+    std::set<ACaptureSessionOutput> mOutputs;
+};
+
+/**
+ * ACameraCaptureSession opaque struct definition
+ * Leave outside of android namespace because it's NDK struct
+ */
+struct ACameraCaptureSession : public RefBase {
+  public:
+    ACameraCaptureSession(
+            int id,
+            const ACaptureSessionOutputContainer* outputs,
+            const ACameraCaptureSession_stateCallbacks* cb,
+            CameraDevice* device) :
+            mId(id), mOutput(*outputs), mUserSessionCallback(*cb),
+            mDevice(device) {}
+
+    // This can be called in app calling close() or after some app callback is finished
+    // Make sure the caller does not hold device or session lock!
+    ~ACameraCaptureSession();
+
+    // No API except Session_Close will work if device is closed
+    // A session will enter closed state when one of the following happens:
+    //     1. Explicitly closed by app
+    //     2. Replaced by a newer session
+    //     3. Device is closed
+    bool isClosed() { Mutex::Autolock _l(mSessionLock); return mIsClosed; }
+
+    // Close the session and mark app no longer need this session.
+    void closeByApp();
+
+    camera_status_t stopRepeating();
+
+    camera_status_t abortCaptures();
+
+    camera_status_t setRepeatingRequest(
+            /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+            int numRequests, ACaptureRequest** requests,
+            /*optional*/int* captureSequenceId);
+
+    camera_status_t capture(
+            /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+            int numRequests, ACaptureRequest** requests,
+            /*optional*/int* captureSequenceId);
+
+    ACameraDevice* getDevice();
+
+  private:
+    friend class CameraDevice;
+
+    // Close session because app close camera device, camera device got ERROR_DISCONNECTED,
+    // or a new session is replacing this session.
+    void closeByDevice();
+
+    sp<CameraDevice> getDeviceSp();
+
+    const int mId;
+    const ACaptureSessionOutputContainer mOutput;
+    const ACameraCaptureSession_stateCallbacks mUserSessionCallback;
+    const wp<CameraDevice> mDevice;
+    bool  mIsClosed = false;
+    bool  mClosedByApp = false;
+    Mutex mSessionLock;
+};
+
+#endif // _ACAMERA_CAPTURE_SESSION_H
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
new file mode 100644
index 0000000..7d78e2b
--- /dev/null
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -0,0 +1,1369 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ACameraDevice"
+
+#include <vector>
+#include <inttypes.h>
+#include <android/hardware/ICameraService.h>
+#include <camera2/SubmitInfo.h>
+#include <gui/Surface.h>
+#include "ACameraDevice.h"
+#include "ACameraMetadata.h"
+#include "ACaptureRequest.h"
+#include "ACameraCaptureSession.h"
+
+using namespace android;
+
+namespace android {
+// Static member definitions
+const char* CameraDevice::kContextKey        = "Context";
+const char* CameraDevice::kDeviceKey         = "Device";
+const char* CameraDevice::kErrorCodeKey      = "ErrorCode";
+const char* CameraDevice::kCallbackFpKey     = "Callback";
+const char* CameraDevice::kSessionSpKey      = "SessionSp";
+const char* CameraDevice::kCaptureRequestKey = "CaptureRequest";
+const char* CameraDevice::kTimeStampKey      = "TimeStamp";
+const char* CameraDevice::kCaptureResultKey  = "CaptureResult";
+const char* CameraDevice::kCaptureFailureKey = "CaptureFailure";
+const char* CameraDevice::kSequenceIdKey     = "SequenceId";
+const char* CameraDevice::kFrameNumberKey    = "FrameNumber";
+const char* CameraDevice::kAnwKey            = "Anw";
+
+/**
+ * CameraDevice Implementation
+ */
+CameraDevice::CameraDevice(
+        const char* id,
+        ACameraDevice_StateCallbacks* cb,
+        std::unique_ptr<ACameraMetadata> chars,
+        ACameraDevice* wrapper) :
+        mCameraId(id),
+        mAppCallbacks(*cb),
+        mChars(std::move(chars)),
+        mServiceCallback(new ServiceCallback(this)),
+        mWrapper(wrapper),
+        mInError(false),
+        mError(ACAMERA_OK),
+        mIdle(true) {
+    mClosing = false;
+    // Setup looper thread to perfrom device callbacks to app
+    mCbLooper = new ALooper;
+    mCbLooper->setName("C2N-dev-looper");
+    status_t err = mCbLooper->start(
+            /*runOnCallingThread*/false,
+            /*canCallJava*/       true,
+            PRIORITY_DEFAULT);
+    if (err != OK) {
+        ALOGE("%s: Unable to start camera device callback looper: %s (%d)",
+                __FUNCTION__, strerror(-err), err);
+        setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+    }
+    mHandler = new CallbackHandler();
+    mCbLooper->registerHandler(mHandler);
+
+    const CameraMetadata& metadata = mChars->getInternalData();
+    camera_metadata_ro_entry entry = metadata.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+    if (entry.count != 1) {
+        ALOGW("%s: bad count %zu for partial result count", __FUNCTION__, entry.count);
+        mPartialResultCount = 1;
+    } else {
+        mPartialResultCount = entry.data.i32[0];
+    }
+
+    entry = metadata.find(ANDROID_LENS_INFO_SHADING_MAP_SIZE);
+    if (entry.count != 2) {
+        ALOGW("%s: bad count %zu for shading map size", __FUNCTION__, entry.count);
+        mShadingMapSize[0] = 0;
+        mShadingMapSize[1] = 0;
+    } else {
+        mShadingMapSize[0] = entry.data.i32[0];
+        mShadingMapSize[1] = entry.data.i32[1];
+    }
+}
+
+// Device close implementaiton
+CameraDevice::~CameraDevice() {
+    Mutex::Autolock _l(mDeviceLock);
+    if (!isClosed()) {
+        disconnectLocked();
+    }
+    if (mCbLooper != nullptr) {
+        mCbLooper->unregisterHandler(mHandler->id());
+        mCbLooper->stop();
+    }
+    mCbLooper.clear();
+    mHandler.clear();
+}
+
+// TODO: cached created request?
+camera_status_t
+CameraDevice::createCaptureRequest(
+        ACameraDevice_request_template templateId,
+        ACaptureRequest** request) const {
+    Mutex::Autolock _l(mDeviceLock);
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        return ret;
+    }
+    if (mRemote == nullptr) {
+        return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+    }
+    CameraMetadata rawRequest;
+    binder::Status remoteRet = mRemote->createDefaultRequest(templateId, &rawRequest);
+    if (remoteRet.serviceSpecificErrorCode() ==
+            hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT) {
+        ALOGW("Create capture request failed! template %d is not supported on this device",
+            templateId);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    } else if (!remoteRet.isOk()) {
+        ALOGE("Create capture request failed: %s", remoteRet.toString8().string());
+        return ACAMERA_ERROR_UNKNOWN;
+    }
+    ACaptureRequest* outReq = new ACaptureRequest();
+    outReq->settings = new ACameraMetadata(rawRequest.release(), ACameraMetadata::ACM_REQUEST);
+    outReq->targets  = new ACameraOutputTargets();
+    *request = outReq;
+    return ACAMERA_OK;
+}
+
+camera_status_t
+CameraDevice::createCaptureSession(
+        const ACaptureSessionOutputContainer*       outputs,
+        const ACameraCaptureSession_stateCallbacks* callbacks,
+        /*out*/ACameraCaptureSession** session) {
+    Mutex::Autolock _l(mDeviceLock);
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        return ret;
+    }
+
+    if (mCurrentSession != nullptr) {
+        mCurrentSession->closeByDevice();
+        stopRepeatingLocked();
+    }
+
+    // Create new session
+    ret = configureStreamsLocked(outputs);
+    if (ret != ACAMERA_OK) {
+        ALOGE("Fail to create new session. cannot configure streams");
+        return ret;
+    }
+
+    ACameraCaptureSession* newSession = new ACameraCaptureSession(
+            mNextSessionId++, outputs, callbacks, this);
+
+    // set new session as current session
+    newSession->incStrong((void *) ACameraDevice_createCaptureSession);
+    mCurrentSession = newSession;
+    mFlushing = false;
+    *session = newSession;
+    return ACAMERA_OK;
+}
+
+camera_status_t
+CameraDevice::captureLocked(
+        sp<ACameraCaptureSession> session,
+        /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    return submitRequestsLocked(
+            session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/false);
+}
+
+camera_status_t
+CameraDevice::setRepeatingRequestsLocked(
+        sp<ACameraCaptureSession> session,
+        /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId) {
+    return submitRequestsLocked(
+            session, cbs, numRequests, requests, captureSequenceId, /*isRepeating*/true);
+}
+
+camera_status_t
+CameraDevice::submitRequestsLocked(
+        sp<ACameraCaptureSession> session,
+        /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId,
+        bool isRepeating) {
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        ALOGE("Camera %s submit capture request failed! ret %d", getId(), ret);
+        return ret;
+    }
+
+    // Form two vectors of capture request, one for internal tracking
+    std::vector<hardware::camera2::CaptureRequest> requestList;
+    Vector<sp<CaptureRequest> > requestsV;
+    requestsV.setCapacity(numRequests);
+    for (int i = 0; i < numRequests; i++) {
+        sp<CaptureRequest> req;
+        ret = allocateCaptureRequest(requests[i], req);
+        if (ret != ACAMERA_OK) {
+            ALOGE("Convert capture request to internal format failure! ret %d", ret);
+            return ret;
+        }
+        if (req->mSurfaceList.empty()) {
+            ALOGE("Capture request without output target cannot be submitted!");
+            return ACAMERA_ERROR_INVALID_PARAMETER;
+        }
+        requestList.push_back(*(req.get()));
+        requestsV.push_back(req);
+    }
+
+    if (isRepeating) {
+        ret = stopRepeatingLocked();
+        if (ret != ACAMERA_OK) {
+            ALOGE("Camera %s stop repeating failed! ret %d", getId(), ret);
+            return ret;
+        }
+    }
+
+    binder::Status remoteRet;
+    hardware::camera2::utils::SubmitInfo info;
+    remoteRet = mRemote->submitRequestList(requestList, isRepeating, &info);
+    int sequenceId = info.mRequestId;
+    int64_t lastFrameNumber = info.mLastFrameNumber;
+    if (sequenceId < 0) {
+        ALOGE("Camera %s submit request remote failure: ret %d", getId(), sequenceId);
+        return ACAMERA_ERROR_UNKNOWN;
+    }
+
+    CallbackHolder cbHolder(session, requestsV, isRepeating, cbs);
+    mSequenceCallbackMap.insert(std::make_pair(sequenceId, cbHolder));
+
+    if (isRepeating) {
+        // stopRepeating above should have cleanup repeating sequence id
+        if (mRepeatingSequenceId != REQUEST_ID_NONE) {
+            setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+            return ACAMERA_ERROR_CAMERA_DEVICE;
+        }
+        mRepeatingSequenceId = sequenceId;
+    } else {
+        mSequenceLastFrameNumberMap.insert(std::make_pair(sequenceId, lastFrameNumber));
+    }
+
+    if (mIdle) {
+        sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
+        msg->setPointer(kContextKey, session->mUserSessionCallback.context);
+        msg->setObject(kSessionSpKey, session);
+        msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onActive);
+        msg->post();
+    }
+    mIdle = false;
+    mBusySession = session;
+
+    if (captureSequenceId) {
+        *captureSequenceId = sequenceId;
+    }
+    return ACAMERA_OK;
+}
+
+camera_status_t
+CameraDevice::allocateCaptureRequest(
+        const ACaptureRequest* request, /*out*/sp<CaptureRequest>& outReq) {
+    camera_status_t ret;
+    sp<CaptureRequest> req(new CaptureRequest());
+    req->mMetadata = request->settings->getInternalData();
+    req->mIsReprocess = false; // NDK does not support reprocessing yet
+
+    for (auto outputTarget : request->targets->mOutputs) {
+        ANativeWindow* anw = outputTarget.mWindow;
+        sp<Surface> surface;
+        ret = getSurfaceFromANativeWindow(anw, surface);
+        if (ret != ACAMERA_OK) {
+            ALOGE("Bad output target in capture request! ret %d", ret);
+            return ret;
+        }
+        req->mSurfaceList.push_back(surface);
+    }
+    outReq = req;
+    return ACAMERA_OK;
+}
+
+ACaptureRequest*
+CameraDevice::allocateACaptureRequest(sp<CaptureRequest>& req) {
+    ACaptureRequest* pRequest = new ACaptureRequest();
+    CameraMetadata clone = req->mMetadata;
+    pRequest->settings = new ACameraMetadata(clone.release(), ACameraMetadata::ACM_REQUEST);
+    pRequest->targets  = new ACameraOutputTargets();
+    for (size_t i = 0; i < req->mSurfaceList.size(); i++) {
+        ANativeWindow* anw = static_cast<ANativeWindow*>(req->mSurfaceList[i].get());
+        ACameraOutputTarget outputTarget(anw);
+        pRequest->targets->mOutputs.insert(outputTarget);
+    }
+    return pRequest;
+}
+
+void
+CameraDevice::freeACaptureRequest(ACaptureRequest* req) {
+    if (req == nullptr) {
+        return;
+    }
+    delete req->settings;
+    delete req->targets;
+    delete req;
+}
+
+void
+CameraDevice::notifySessionEndOfLifeLocked(ACameraCaptureSession* session) {
+    if (isClosed()) {
+        // Device is closing already. do nothing
+        return;
+    }
+
+    if (session != mCurrentSession) {
+        // Session has been replaced by other seesion or device is closed
+        return;
+    }
+    mCurrentSession = nullptr;
+
+    // Should not happen
+    if (!session->mIsClosed) {
+        ALOGE("Error: unclosed session %p reaches end of life!", session);
+        setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+        return;
+    }
+
+    // No new session, unconfigure now
+    camera_status_t ret = configureStreamsLocked(nullptr);
+    if (ret != ACAMERA_OK) {
+        ALOGE("Unconfigure stream failed. Device might still be configured! ret %d", ret);
+    }
+}
+
+void
+CameraDevice::disconnectLocked() {
+    if (mClosing.exchange(true)) {
+        // Already closing, just return
+        ALOGW("Camera device %s is already closing.", getId());
+        return;
+    }
+
+    if (mRemote != nullptr) {
+        mRemote->disconnect();
+    }
+    mRemote = nullptr;
+
+    if (mCurrentSession != nullptr) {
+        mCurrentSession->closeByDevice();
+        mCurrentSession = nullptr;
+    }
+}
+
+camera_status_t
+CameraDevice::stopRepeatingLocked() {
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        ALOGE("Camera %s stop repeating failed! ret %d", getId(), ret);
+        return ret;
+    }
+    if (mRepeatingSequenceId != REQUEST_ID_NONE) {
+        int repeatingSequenceId = mRepeatingSequenceId;
+        mRepeatingSequenceId = REQUEST_ID_NONE;
+
+        int64_t lastFrameNumber;
+        binder::Status remoteRet = mRemote->cancelRequest(repeatingSequenceId, &lastFrameNumber);
+        if (remoteRet.serviceSpecificErrorCode() ==
+                hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT) {
+            ALOGV("Repeating request is already stopped.");
+            return ACAMERA_OK;
+        } else if (!remoteRet.isOk()) {
+            ALOGE("Stop repeating request fails in remote: %s", remoteRet.toString8().string());
+            return ACAMERA_ERROR_UNKNOWN;
+        }
+        checkRepeatingSequenceCompleteLocked(repeatingSequenceId, lastFrameNumber);
+    }
+    return ACAMERA_OK;
+}
+
+camera_status_t
+CameraDevice::flushLocked(ACameraCaptureSession* session) {
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        ALOGE("Camera %s abort captures failed! ret %d", getId(), ret);
+        return ret;
+    }
+
+    // This should never happen because creating a new session will close
+    // previous one and thus reject any API call from previous session.
+    // But still good to check here in case something unexpected happen.
+    if (session != mCurrentSession) {
+        ALOGE("Camera %s session %p is not current active session!", getId(), session);
+        return ACAMERA_ERROR_INVALID_OPERATION;
+    }
+
+    if (mFlushing) {
+        ALOGW("Camera %s is already aborting captures", getId());
+        return ACAMERA_OK;
+    }
+
+    mFlushing = true;
+    // Send onActive callback to guarantee there is always active->ready transition
+    sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
+    msg->setPointer(kContextKey, session->mUserSessionCallback.context);
+    msg->setObject(kSessionSpKey, session);
+    msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onActive);
+    msg->post();
+
+    // If device is already idling, send callback and exit early
+    if (mIdle) {
+        sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
+        msg->setPointer(kContextKey, session->mUserSessionCallback.context);
+        msg->setObject(kSessionSpKey, session);
+        msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onReady);
+        msg->post();
+        mFlushing = false;
+        return ACAMERA_OK;
+    }
+
+    int64_t lastFrameNumber;
+    binder::Status remoteRet = mRemote->flush(&lastFrameNumber);
+    if (!remoteRet.isOk()) {
+        ALOGE("Abort captures fails in remote: %s", remoteRet.toString8().string());
+        return ACAMERA_ERROR_UNKNOWN;
+    }
+    if (mRepeatingSequenceId != REQUEST_ID_NONE) {
+        checkRepeatingSequenceCompleteLocked(mRepeatingSequenceId, lastFrameNumber);
+    }
+    return ACAMERA_OK;
+}
+
+camera_status_t
+CameraDevice::waitUntilIdleLocked() {
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        ALOGE("Wait until camera %s idle failed! ret %d", getId(), ret);
+        return ret;
+    }
+
+    if (mRepeatingSequenceId != REQUEST_ID_NONE) {
+        ALOGE("Camera device %s won't go to idle when there is repeating request!", getId());
+        return ACAMERA_ERROR_INVALID_OPERATION;
+    }
+
+    binder::Status remoteRet = mRemote->waitUntilIdle();
+    if (!remoteRet.isOk()) {
+        ALOGE("Camera device %s waitUntilIdle failed: %s", getId(), remoteRet.toString8().string());
+        // TODO: define a function to convert status_t -> camera_status_t
+        return ACAMERA_ERROR_UNKNOWN;
+    }
+
+    return ACAMERA_OK;
+}
+
+camera_status_t
+CameraDevice::getIGBPfromAnw(
+        ANativeWindow* anw,
+        sp<IGraphicBufferProducer>& out) {
+    if (anw == nullptr) {
+        ALOGE("Error: output ANativeWindow is null");
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    int value;
+    int err = (*anw->query)(anw, NATIVE_WINDOW_CONCRETE_TYPE, &value);
+    if (err != OK || value != NATIVE_WINDOW_SURFACE) {
+        ALOGE("Error: ANativeWindow is not backed by Surface!");
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    const sp<Surface> surface(static_cast<Surface*>(anw));
+    out = surface->getIGraphicBufferProducer();
+    return ACAMERA_OK;
+}
+
+camera_status_t
+CameraDevice::getSurfaceFromANativeWindow(
+        ANativeWindow* anw, sp<Surface>& out) {
+    if (anw == nullptr) {
+        ALOGE("Error: output ANativeWindow is null");
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    int value;
+    int err = (*anw->query)(anw, NATIVE_WINDOW_CONCRETE_TYPE, &value);
+    if (err != OK || value != NATIVE_WINDOW_SURFACE) {
+        ALOGE("Error: ANativeWindow is not backed by Surface!");
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    sp<Surface> surface(static_cast<Surface*>(anw));
+    out = surface;
+    return ACAMERA_OK;
+}
+
+camera_status_t
+CameraDevice::configureStreamsLocked(const ACaptureSessionOutputContainer* outputs) {
+    ACaptureSessionOutputContainer emptyOutput;
+    if (outputs == nullptr) {
+        outputs = &emptyOutput;
+    }
+
+    camera_status_t ret = checkCameraClosedOrErrorLocked();
+    if (ret != ACAMERA_OK) {
+        return ret;
+    }
+
+    std::set<std::pair<ANativeWindow*, OutputConfiguration>> outputSet;
+    for (auto outConfig : outputs->mOutputs) {
+        ANativeWindow* anw = outConfig.mWindow;
+        sp<IGraphicBufferProducer> iGBP(nullptr);
+        ret = getIGBPfromAnw(anw, iGBP);
+        if (ret != ACAMERA_OK) {
+            return ret;
+        }
+        outputSet.insert(std::make_pair(
+                anw, OutputConfiguration(iGBP, outConfig.mRotation)));
+    }
+    auto addSet = outputSet;
+    std::vector<int> deleteList;
+
+    // Determine which streams need to be created, which to be deleted
+    for (auto& kvPair : mConfiguredOutputs) {
+        int streamId = kvPair.first;
+        auto& outputPair = kvPair.second;
+        if (outputSet.count(outputPair) == 0) {
+            deleteList.push_back(streamId); // Need to delete a no longer needed stream
+        } else {
+            addSet.erase(outputPair);        // No need to add already existing stream
+        }
+    }
+
+    ret = stopRepeatingLocked();
+    if (ret != ACAMERA_OK) {
+        ALOGE("Camera device %s stop repeating failed, ret %d", getId(), ret);
+        return ret;
+    }
+
+    ret = waitUntilIdleLocked();
+    if (ret != ACAMERA_OK) {
+        ALOGE("Camera device %s wait until idle failed, ret %d", getId(), ret);
+        return ret;
+    }
+
+    // Send onReady to previous session
+    // CurrentSession will be updated after configureStreamLocked, so here
+    // mCurrentSession is the session to be replaced by a new session
+    if (!mIdle && mCurrentSession != nullptr) {
+        if (mBusySession != mCurrentSession) {
+            ALOGE("Current session != busy session");
+            setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+            return ACAMERA_ERROR_CAMERA_DEVICE;
+        }
+        sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
+        msg->setPointer(kContextKey, mBusySession->mUserSessionCallback.context);
+        msg->setObject(kSessionSpKey, mBusySession);
+        msg->setPointer(kCallbackFpKey, (void*) mBusySession->mUserSessionCallback.onReady);
+        mBusySession.clear();
+        msg->post();
+    }
+    mIdle = true;
+
+    binder::Status remoteRet = mRemote->beginConfigure();
+    if (!remoteRet.isOk()) {
+        ALOGE("Camera device %s begin configure failed: %s", getId(), remoteRet.toString8().string());
+        return ACAMERA_ERROR_UNKNOWN;
+    }
+
+    // delete to-be-deleted streams
+    for (auto streamId : deleteList) {
+        remoteRet = mRemote->deleteStream(streamId);
+        if (!remoteRet.isOk()) {
+            ALOGE("Camera device %s failed to remove stream %d: %s", getId(), streamId,
+                    remoteRet.toString8().string());
+            return ACAMERA_ERROR_UNKNOWN;
+        }
+        mConfiguredOutputs.erase(streamId);
+    }
+
+    // add new streams
+    for (auto outputPair : addSet) {
+        int streamId;
+        remoteRet = mRemote->createStream(outputPair.second, &streamId);
+        if (!remoteRet.isOk()) {
+            ALOGE("Camera device %s failed to create stream: %s", getId(),
+                    remoteRet.toString8().string());
+            return ACAMERA_ERROR_UNKNOWN;
+        }
+        mConfiguredOutputs.insert(std::make_pair(streamId, outputPair));
+    }
+
+    remoteRet = mRemote->endConfigure(/*isConstrainedHighSpeed*/ false);
+    if (remoteRet.serviceSpecificErrorCode() == hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT) {
+        ALOGE("Camera device %s cannnot support app output configuration: %s", getId(),
+                remoteRet.toString8().string());
+        return ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
+    } else if (!remoteRet.isOk()) {
+        ALOGE("Camera device %s end configure failed: %s", getId(), remoteRet.toString8().string());
+        return ACAMERA_ERROR_UNKNOWN;
+    }
+
+    return ACAMERA_OK;
+}
+
+void
+CameraDevice::setRemoteDevice(sp<hardware::camera2::ICameraDeviceUser> remote) {
+    Mutex::Autolock _l(mDeviceLock);
+    mRemote = remote;
+}
+
+camera_status_t
+CameraDevice::checkCameraClosedOrErrorLocked() const {
+    if (mRemote == nullptr) {
+        ALOGE("%s: camera device already closed", __FUNCTION__);
+        return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+    }
+    if (mInError) {// triggered by onDeviceError
+        ALOGE("%s: camera device has encountered a serious error", __FUNCTION__);
+        return mError;
+    }
+    return ACAMERA_OK;
+}
+
+void
+CameraDevice::setCameraDeviceErrorLocked(camera_status_t error) {
+    mInError = true;
+    mError = error;
+    return;
+}
+
+void
+CameraDevice::FrameNumberTracker::updateTracker(int64_t frameNumber, bool isError) {
+    ALOGV("updateTracker frame %" PRId64 " isError %d", frameNumber, isError);
+    if (isError) {
+        mFutureErrorSet.insert(frameNumber);
+    } else if (frameNumber <= mCompletedFrameNumber) {
+        ALOGE("Frame number %" PRId64 " decreased! current fn %" PRId64,
+                frameNumber, mCompletedFrameNumber);
+        return;
+    } else {
+        if (frameNumber != mCompletedFrameNumber + 1) {
+            ALOGE("Frame number out of order. Expect %" PRId64 " but get %" PRId64,
+                    mCompletedFrameNumber + 1, frameNumber);
+            // Do not assert as in java implementation
+        }
+        mCompletedFrameNumber = frameNumber;
+    }
+    update();
+}
+
+void
+CameraDevice::FrameNumberTracker::update() {
+    for (auto it = mFutureErrorSet.begin(); it != mFutureErrorSet.end();) {
+        int64_t errorFrameNumber = *it;
+        if (errorFrameNumber == mCompletedFrameNumber + 1) {
+            mCompletedFrameNumber++;
+            it = mFutureErrorSet.erase(it);
+        } else if (errorFrameNumber <= mCompletedFrameNumber) {
+            // This should not happen, but deal with it anyway
+            ALOGE("Completd frame number passed through current frame number!");
+            // erase the old error since it's no longer useful
+            it = mFutureErrorSet.erase(it);
+        } else {
+            // Normal requests hasn't catched up error frames, just break
+            break;
+        }
+    }
+    ALOGV("Update complete frame %" PRId64, mCompletedFrameNumber);
+}
+
+void
+CameraDevice::onCaptureErrorLocked(
+        int32_t errorCode,
+        const CaptureResultExtras& resultExtras) {
+    int sequenceId = resultExtras.requestId;
+    int64_t frameNumber = resultExtras.frameNumber;
+    int32_t burstId = resultExtras.burstId;
+    auto it = mSequenceCallbackMap.find(sequenceId);
+    if (it == mSequenceCallbackMap.end()) {
+        ALOGE("%s: Error: capture sequence index %d not found!",
+                __FUNCTION__, sequenceId);
+        setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
+        return;
+    }
+
+    CallbackHolder cbh = (*it).second;
+    sp<ACameraCaptureSession> session = cbh.mSession;
+    if ((size_t) burstId >= cbh.mRequests.size()) {
+        ALOGE("%s: Error: request index %d out of bound (size %zu)",
+                __FUNCTION__, burstId, cbh.mRequests.size());
+        setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
+        return;
+    }
+    sp<CaptureRequest> request = cbh.mRequests[burstId];
+
+    // Handle buffer error
+    if (errorCode == hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER) {
+        int32_t streamId = resultExtras.errorStreamId;
+        ACameraCaptureSession_captureCallback_bufferLost onBufferLost =
+                cbh.mCallbacks.onCaptureBufferLost;
+        auto outputPairIt = mConfiguredOutputs.find(streamId);
+        if (outputPairIt == mConfiguredOutputs.end()) {
+            ALOGE("%s: Error: stream id %d does not exist", __FUNCTION__, streamId);
+            setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
+            return;
+        }
+        ANativeWindow* anw = outputPairIt->second.first;
+
+        ALOGV("Camera %s Lost output buffer for ANW %p frame %" PRId64,
+                getId(), anw, frameNumber);
+
+        sp<AMessage> msg = new AMessage(kWhatCaptureBufferLost, mHandler);
+        msg->setPointer(kContextKey, cbh.mCallbacks.context);
+        msg->setObject(kSessionSpKey, session);
+        msg->setPointer(kCallbackFpKey, (void*) onBufferLost);
+        msg->setObject(kCaptureRequestKey, request);
+        msg->setPointer(kAnwKey, (void*) anw);
+        msg->setInt64(kFrameNumberKey, frameNumber);
+        msg->post();
+    } else { // Handle other capture failures
+        // Fire capture failure callback if there is one registered
+        ACameraCaptureSession_captureCallback_failed onError = cbh.mCallbacks.onCaptureFailed;
+        sp<CameraCaptureFailure> failure(new CameraCaptureFailure());
+        failure->frameNumber = frameNumber;
+        // TODO: refine this when implementing flush
+        failure->reason      = CAPTURE_FAILURE_REASON_ERROR;
+        failure->sequenceId  = sequenceId;
+        failure->wasImageCaptured = (errorCode ==
+                hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT);
+
+        sp<AMessage> msg = new AMessage(kWhatCaptureFail, mHandler);
+        msg->setPointer(kContextKey, cbh.mCallbacks.context);
+        msg->setObject(kSessionSpKey, session);
+        msg->setPointer(kCallbackFpKey, (void*) onError);
+        msg->setObject(kCaptureRequestKey, request);
+        msg->setObject(kCaptureFailureKey, failure);
+        msg->post();
+
+        // Update tracker
+        mFrameNumberTracker.updateTracker(frameNumber, /*isError*/true);
+        checkAndFireSequenceCompleteLocked();
+    }
+    return;
+}
+
+void CameraDevice::CallbackHandler::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatOnDisconnected:
+        case kWhatOnError:
+        case kWhatSessionStateCb:
+        case kWhatCaptureStart:
+        case kWhatCaptureResult:
+        case kWhatCaptureFail:
+        case kWhatCaptureSeqEnd:
+        case kWhatCaptureSeqAbort:
+        case kWhatCaptureBufferLost:
+            ALOGV("%s: Received msg %d", __FUNCTION__, msg->what());
+            break;
+        default:
+            ALOGE("%s:Error: unknown device callback %d", __FUNCTION__, msg->what());
+            return;
+    }
+    // Check the common part of all message
+    void* context;
+    bool found = msg->findPointer(kContextKey, &context);
+    if (!found) {
+        ALOGE("%s: Cannot find callback context!", __FUNCTION__);
+        return;
+    }
+    switch (msg->what()) {
+        case kWhatOnDisconnected:
+        {
+            ACameraDevice* dev;
+            found = msg->findPointer(kDeviceKey, (void**) &dev);
+            if (!found || dev == nullptr) {
+                ALOGE("%s: Cannot find device pointer!", __FUNCTION__);
+                return;
+            }
+            ACameraDevice_StateCallback onDisconnected;
+            found = msg->findPointer(kCallbackFpKey, (void**) &onDisconnected);
+            if (!found) {
+                ALOGE("%s: Cannot find onDisconnected!", __FUNCTION__);
+                return;
+            }
+            if (onDisconnected == nullptr) {
+                return;
+            }
+            (*onDisconnected)(context, dev);
+            break;
+        }
+        case kWhatOnError:
+        {
+            ACameraDevice* dev;
+            found = msg->findPointer(kDeviceKey, (void**) &dev);
+            if (!found || dev == nullptr) {
+                ALOGE("%s: Cannot find device pointer!", __FUNCTION__);
+                return;
+            }
+            ACameraDevice_ErrorStateCallback onError;
+            found = msg->findPointer(kCallbackFpKey, (void**) &onError);
+            if (!found) {
+                ALOGE("%s: Cannot find onError!", __FUNCTION__);
+                return;
+            }
+            int errorCode;
+            found = msg->findInt32(kErrorCodeKey, &errorCode);
+            if (!found) {
+                ALOGE("%s: Cannot find error code!", __FUNCTION__);
+                return;
+            }
+            if (onError == nullptr) {
+                return;
+            }
+            (*onError)(context, dev, errorCode);
+            break;
+        }
+        case kWhatSessionStateCb:
+        case kWhatCaptureStart:
+        case kWhatCaptureResult:
+        case kWhatCaptureFail:
+        case kWhatCaptureSeqEnd:
+        case kWhatCaptureSeqAbort:
+        case kWhatCaptureBufferLost:
+        {
+            sp<RefBase> obj;
+            found = msg->findObject(kSessionSpKey, &obj);
+            if (!found || obj == nullptr) {
+                ALOGE("%s: Cannot find session pointer!", __FUNCTION__);
+                return;
+            }
+            sp<ACameraCaptureSession> session(static_cast<ACameraCaptureSession*>(obj.get()));
+            sp<CaptureRequest> requestSp = nullptr;
+            switch (msg->what()) {
+                case kWhatCaptureStart:
+                case kWhatCaptureResult:
+                case kWhatCaptureFail:
+                case kWhatCaptureBufferLost:
+                    found = msg->findObject(kCaptureRequestKey, &obj);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture request!", __FUNCTION__);
+                        return;
+                    }
+                    requestSp = static_cast<CaptureRequest*>(obj.get());
+                    break;
+            }
+
+            switch (msg->what()) {
+                case kWhatSessionStateCb:
+                {
+                    ACameraCaptureSession_stateCallback onState;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onState);
+                    if (!found) {
+                        ALOGE("%s: Cannot find state callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onState == nullptr) {
+                        return;
+                    }
+                    (*onState)(context, session.get());
+                    break;
+                }
+                case kWhatCaptureStart:
+                {
+                    ACameraCaptureSession_captureCallback_start onStart;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onStart);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture start callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onStart == nullptr) {
+                        return;
+                    }
+                    int64_t timestamp;
+                    found = msg->findInt64(kTimeStampKey, &timestamp);
+                    if (!found) {
+                        ALOGE("%s: Cannot find timestamp!", __FUNCTION__);
+                        return;
+                    }
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp);
+                    (*onStart)(context, session.get(), request, timestamp);
+                    freeACaptureRequest(request);
+                    break;
+                }
+                case kWhatCaptureResult:
+                {
+                    ACameraCaptureSession_captureCallback_result onResult;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onResult);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture result callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onResult == nullptr) {
+                        return;
+                    }
+
+                    found = msg->findObject(kCaptureResultKey, &obj);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture result!", __FUNCTION__);
+                        return;
+                    }
+                    sp<ACameraMetadata> result(static_cast<ACameraMetadata*>(obj.get()));
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp);
+                    (*onResult)(context, session.get(), request, result.get());
+                    freeACaptureRequest(request);
+                    break;
+                }
+                case kWhatCaptureFail:
+                {
+                    ACameraCaptureSession_captureCallback_failed onFail;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onFail);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture fail callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onFail == nullptr) {
+                        return;
+                    }
+
+                    found = msg->findObject(kCaptureFailureKey, &obj);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture failure!", __FUNCTION__);
+                        return;
+                    }
+                    sp<CameraCaptureFailure> failureSp(
+                            static_cast<CameraCaptureFailure*>(obj.get()));
+                    ACameraCaptureFailure* failure =
+                            static_cast<ACameraCaptureFailure*>(failureSp.get());
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp);
+                    (*onFail)(context, session.get(), request, failure);
+                    freeACaptureRequest(request);
+                    delete failure;
+                    break;
+                }
+                case kWhatCaptureSeqEnd:
+                {
+                    ACameraCaptureSession_captureCallback_sequenceEnd onSeqEnd;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onSeqEnd);
+                    if (!found) {
+                        ALOGE("%s: Cannot find sequence end callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onSeqEnd == nullptr) {
+                        return;
+                    }
+                    int seqId;
+                    found = msg->findInt32(kSequenceIdKey, &seqId);
+                    if (!found) {
+                        ALOGE("%s: Cannot find frame number!", __FUNCTION__);
+                        return;
+                    }
+                    int64_t frameNumber;
+                    found = msg->findInt64(kFrameNumberKey, &frameNumber);
+                    if (!found) {
+                        ALOGE("%s: Cannot find frame number!", __FUNCTION__);
+                        return;
+                    }
+                    (*onSeqEnd)(context, session.get(), seqId, frameNumber);
+                    break;
+                }
+                case kWhatCaptureSeqAbort:
+                {
+                    ACameraCaptureSession_captureCallback_sequenceAbort onSeqAbort;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onSeqAbort);
+                    if (!found) {
+                        ALOGE("%s: Cannot find sequence end callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onSeqAbort == nullptr) {
+                        return;
+                    }
+                    int seqId;
+                    found = msg->findInt32(kSequenceIdKey, &seqId);
+                    if (!found) {
+                        ALOGE("%s: Cannot find frame number!", __FUNCTION__);
+                        return;
+                    }
+                    (*onSeqAbort)(context, session.get(), seqId);
+                    break;
+                }
+                case kWhatCaptureBufferLost:
+                {
+                    ACameraCaptureSession_captureCallback_bufferLost onBufferLost;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onBufferLost);
+                    if (!found) {
+                        ALOGE("%s: Cannot find buffer lost callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onBufferLost == nullptr) {
+                        return;
+                    }
+
+                    ANativeWindow* anw;
+                    found = msg->findPointer(kAnwKey, (void**) &anw);
+                    if (!found) {
+                        ALOGE("%s: Cannot find ANativeWindow!", __FUNCTION__);
+                        return;
+                    }
+
+                    int64_t frameNumber;
+                    found = msg->findInt64(kFrameNumberKey, &frameNumber);
+                    if (!found) {
+                        ALOGE("%s: Cannot find frame number!", __FUNCTION__);
+                        return;
+                    }
+
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp);
+                    (*onBufferLost)(context, session.get(), request, anw, frameNumber);
+                    freeACaptureRequest(request);
+                    break;
+                }
+            }
+            break;
+        }
+    }
+}
+
+CameraDevice::CallbackHolder::CallbackHolder(
+    sp<ACameraCaptureSession>          session,
+    const Vector<sp<CaptureRequest> >& requests,
+    bool                               isRepeating,
+    ACameraCaptureSession_captureCallbacks* cbs) :
+    mSession(session), mRequests(requests),
+    mIsRepeating(isRepeating), mCallbacks(fillCb(cbs)) {}
+
+void
+CameraDevice::checkRepeatingSequenceCompleteLocked(
+    const int sequenceId, const int64_t lastFrameNumber) {
+    ALOGV("Repeating seqId %d lastFrameNumer %" PRId64, sequenceId, lastFrameNumber);
+    if (lastFrameNumber == NO_FRAMES_CAPTURED) {
+        if (mSequenceCallbackMap.count(sequenceId) == 0) {
+            ALOGW("No callback found for sequenceId %d", sequenceId);
+            return;
+        }
+        // remove callback holder from callback map
+        auto cbIt = mSequenceCallbackMap.find(sequenceId);
+        CallbackHolder cbh = cbIt->second;
+        mSequenceCallbackMap.erase(cbIt);
+        // send seq aborted callback
+        sp<AMessage> msg = new AMessage(kWhatCaptureSeqAbort, mHandler);
+        msg->setPointer(kContextKey, cbh.mCallbacks.context);
+        msg->setObject(kSessionSpKey, cbh.mSession);
+        msg->setPointer(kCallbackFpKey, (void*) cbh.mCallbacks.onCaptureSequenceAborted);
+        msg->setInt32(kSequenceIdKey, sequenceId);
+        msg->post();
+    } else {
+        // Use mSequenceLastFrameNumberMap to track
+        mSequenceLastFrameNumberMap.insert(std::make_pair(sequenceId, lastFrameNumber));
+
+        // Last frame might have arrived. Check now
+        checkAndFireSequenceCompleteLocked();
+    }
+}
+
+void
+CameraDevice::checkAndFireSequenceCompleteLocked() {
+    int64_t completedFrameNumber = mFrameNumberTracker.getCompletedFrameNumber();
+    //std::map<int, int64_t> mSequenceLastFrameNumberMap;
+    auto it = mSequenceLastFrameNumberMap.begin();
+    while (it != mSequenceLastFrameNumberMap.end()) {
+        int sequenceId = it->first;
+        int64_t lastFrameNumber = it->second;
+        bool seqCompleted = false;
+        bool hasCallback  = true;
+
+        if (mRemote == nullptr) {
+            ALOGW("Camera %s closed while checking sequence complete", getId());
+            return;
+        }
+
+        // Check if there is callback for this sequence
+        // This should not happen because we always register callback (with nullptr inside)
+        if (mSequenceCallbackMap.count(sequenceId) == 0) {
+            ALOGW("No callback found for sequenceId %d", sequenceId);
+            hasCallback = false;
+        }
+
+        if (lastFrameNumber <= completedFrameNumber) {
+            ALOGV("seq %d reached last frame %" PRId64 ", completed %" PRId64,
+                    sequenceId, lastFrameNumber, completedFrameNumber);
+            seqCompleted = true;
+        }
+
+        if (seqCompleted && hasCallback) {
+            // remove callback holder from callback map
+            auto cbIt = mSequenceCallbackMap.find(sequenceId);
+            CallbackHolder cbh = cbIt->second;
+            mSequenceCallbackMap.erase(cbIt);
+            // send seq complete callback
+            sp<AMessage> msg = new AMessage(kWhatCaptureSeqEnd, mHandler);
+            msg->setPointer(kContextKey, cbh.mCallbacks.context);
+            msg->setObject(kSessionSpKey, cbh.mSession);
+            msg->setPointer(kCallbackFpKey, (void*) cbh.mCallbacks.onCaptureSequenceCompleted);
+            msg->setInt32(kSequenceIdKey, sequenceId);
+            msg->setInt64(kFrameNumberKey, lastFrameNumber);
+
+            // Clear the session sp before we send out the message
+            // This will guarantee the rare case where the message is processed
+            // before cbh goes out of scope and causing we call the session
+            // destructor while holding device lock
+            cbh.mSession.clear();
+            msg->post();
+        }
+
+        // No need to track sequence complete if there is no callback registered
+        if (seqCompleted || !hasCallback) {
+            it = mSequenceLastFrameNumberMap.erase(it);
+        } else {
+            ++it;
+        }
+    }
+}
+
+/**
+  * Camera service callback implementation
+  */
+binder::Status
+CameraDevice::ServiceCallback::onDeviceError(
+        int32_t errorCode,
+        const CaptureResultExtras& resultExtras) {
+    ALOGD("Device error received, code %d, frame number %" PRId64 ", request ID %d, subseq ID %d",
+            errorCode, resultExtras.frameNumber, resultExtras.requestId, resultExtras.burstId);
+    binder::Status ret = binder::Status::ok();
+    sp<CameraDevice> dev = mDevice.promote();
+    if (dev == nullptr) {
+        return ret; // device has been closed
+    }
+
+    Mutex::Autolock _l(dev->mDeviceLock);
+    if (dev->mRemote == nullptr) {
+        return ret; // device has been closed
+    }
+    switch (errorCode) {
+        case ERROR_CAMERA_DISCONNECTED:
+        {
+            // Camera is disconnected, close the session and expect no more callbacks
+            if (dev->mCurrentSession != nullptr) {
+                dev->mCurrentSession->closeByDevice();
+                dev->mCurrentSession = nullptr;
+            }
+            sp<AMessage> msg = new AMessage(kWhatOnDisconnected, dev->mHandler);
+            msg->setPointer(kContextKey, dev->mAppCallbacks.context);
+            msg->setPointer(kDeviceKey, (void*) dev->getWrapper());
+            msg->setPointer(kCallbackFpKey, (void*) dev->mAppCallbacks.onDisconnected);
+            msg->post();
+            break;
+        }
+        default:
+            ALOGE("Unknown error from camera device: %d", errorCode);
+            // no break
+        case ERROR_CAMERA_DEVICE:
+        case ERROR_CAMERA_SERVICE:
+        {
+            switch (errorCode) {
+                case ERROR_CAMERA_DEVICE:
+                    dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+                    break;
+                case ERROR_CAMERA_SERVICE:
+                    dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
+                    break;
+                default:
+                    dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_UNKNOWN);
+                    break;
+            }
+            sp<AMessage> msg = new AMessage(kWhatOnError, dev->mHandler);
+            msg->setPointer(kContextKey, dev->mAppCallbacks.context);
+            msg->setPointer(kDeviceKey, (void*) dev->getWrapper());
+            msg->setPointer(kCallbackFpKey, (void*) dev->mAppCallbacks.onError);
+            msg->setInt32(kErrorCodeKey, errorCode);
+            msg->post();
+            break;
+        }
+        case ERROR_CAMERA_REQUEST:
+        case ERROR_CAMERA_RESULT:
+        case ERROR_CAMERA_BUFFER:
+            dev->onCaptureErrorLocked(errorCode, resultExtras);
+            break;
+    }
+    return ret;
+}
+
+binder::Status
+CameraDevice::ServiceCallback::onDeviceIdle() {
+    ALOGV("Camera is now idle");
+    binder::Status ret = binder::Status::ok();
+    sp<CameraDevice> dev = mDevice.promote();
+    if (dev == nullptr) {
+        return ret; // device has been closed
+    }
+
+    Mutex::Autolock _l(dev->mDeviceLock);
+    if (dev->isClosed() || dev->mRemote == nullptr) {
+        return ret;
+    }
+
+    if (dev->mIdle) {
+        // Already in idle state. Possibly other thread did waitUntilIdle
+        return ret;
+    }
+
+    if (dev->mCurrentSession != nullptr) {
+        ALOGE("onDeviceIdle sending state cb");
+        if (dev->mBusySession != dev->mCurrentSession) {
+            ALOGE("Current session != busy session");
+            dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+            return ret;
+        }
+        sp<AMessage> msg = new AMessage(kWhatSessionStateCb, dev->mHandler);
+        msg->setPointer(kContextKey, dev->mBusySession->mUserSessionCallback.context);
+        msg->setObject(kSessionSpKey, dev->mBusySession);
+        msg->setPointer(kCallbackFpKey, (void*) dev->mBusySession->mUserSessionCallback.onReady);
+        // Make sure we clear the sp first so the session destructor can
+        // only happen on handler thread (where we don't hold device/session lock)
+        dev->mBusySession.clear();
+        msg->post();
+    }
+    dev->mIdle = true;
+    dev->mFlushing = false;
+    return ret;
+}
+
+binder::Status
+CameraDevice::ServiceCallback::onCaptureStarted(
+        const CaptureResultExtras& resultExtras,
+        int64_t timestamp) {
+    binder::Status ret = binder::Status::ok();
+
+    sp<CameraDevice> dev = mDevice.promote();
+    if (dev == nullptr) {
+        return ret; // device has been closed
+    }
+    Mutex::Autolock _l(dev->mDeviceLock);
+    if (dev->isClosed() || dev->mRemote == nullptr) {
+        return ret;
+    }
+
+    int sequenceId = resultExtras.requestId;
+    int32_t burstId = resultExtras.burstId;
+
+    auto it = dev->mSequenceCallbackMap.find(sequenceId);
+    if (it != dev->mSequenceCallbackMap.end()) {
+        CallbackHolder cbh = (*it).second;
+        ACameraCaptureSession_captureCallback_start onStart = cbh.mCallbacks.onCaptureStarted;
+        sp<ACameraCaptureSession> session = cbh.mSession;
+        if ((size_t) burstId >= cbh.mRequests.size()) {
+            ALOGE("%s: Error: request index %d out of bound (size %zu)",
+                    __FUNCTION__, burstId, cbh.mRequests.size());
+            dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
+        }
+        sp<CaptureRequest> request = cbh.mRequests[burstId];
+        sp<AMessage> msg = new AMessage(kWhatCaptureStart, dev->mHandler);
+        msg->setPointer(kContextKey, cbh.mCallbacks.context);
+        msg->setObject(kSessionSpKey, session);
+        msg->setPointer(kCallbackFpKey, (void*) onStart);
+        msg->setObject(kCaptureRequestKey, request);
+        msg->setInt64(kTimeStampKey, timestamp);
+        msg->post();
+    }
+    return ret;
+}
+
+binder::Status
+CameraDevice::ServiceCallback::onResultReceived(
+        const CameraMetadata& metadata,
+        const CaptureResultExtras& resultExtras) {
+    binder::Status ret = binder::Status::ok();
+
+    sp<CameraDevice> dev = mDevice.promote();
+    if (dev == nullptr) {
+        return ret; // device has been closed
+    }
+    int sequenceId = resultExtras.requestId;
+    int64_t frameNumber = resultExtras.frameNumber;
+    int32_t burstId = resultExtras.burstId;
+    bool    isPartialResult = (resultExtras.partialResultCount < dev->mPartialResultCount);
+
+    if (!isPartialResult) {
+        ALOGV("SeqId %d frame %" PRId64 " result arrive.", sequenceId, frameNumber);
+    }
+
+    Mutex::Autolock _l(dev->mDeviceLock);
+    if (dev->mRemote == nullptr) {
+        return ret; // device has been disconnected
+    }
+
+    if (dev->isClosed()) {
+        if (!isPartialResult) {
+            dev->mFrameNumberTracker.updateTracker(frameNumber, /*isError*/false);
+        }
+        // early return to avoid callback sent to closed devices
+        return ret;
+    }
+
+    CameraMetadata metadataCopy = metadata;
+    metadataCopy.update(ANDROID_LENS_INFO_SHADING_MAP_SIZE, dev->mShadingMapSize, /*data_count*/2);
+    metadataCopy.update(ANDROID_SYNC_FRAME_NUMBER, &frameNumber, /*data_count*/1);
+
+    auto it = dev->mSequenceCallbackMap.find(sequenceId);
+    if (it != dev->mSequenceCallbackMap.end()) {
+        CallbackHolder cbh = (*it).second;
+        ACameraCaptureSession_captureCallback_result onResult = isPartialResult ?
+                cbh.mCallbacks.onCaptureProgressed :
+                cbh.mCallbacks.onCaptureCompleted;
+        sp<ACameraCaptureSession> session = cbh.mSession;
+        if ((size_t) burstId >= cbh.mRequests.size()) {
+            ALOGE("%s: Error: request index %d out of bound (size %zu)",
+                    __FUNCTION__, burstId, cbh.mRequests.size());
+            dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
+        }
+        sp<CaptureRequest> request = cbh.mRequests[burstId];
+        sp<ACameraMetadata> result(new ACameraMetadata(
+                metadataCopy.release(), ACameraMetadata::ACM_RESULT));
+
+        sp<AMessage> msg = new AMessage(kWhatCaptureResult, dev->mHandler);
+        msg->setPointer(kContextKey, cbh.mCallbacks.context);
+        msg->setObject(kSessionSpKey, session);
+        msg->setPointer(kCallbackFpKey, (void*) onResult);
+        msg->setObject(kCaptureRequestKey, request);
+        msg->setObject(kCaptureResultKey, result);
+        msg->post();
+    }
+
+    if (!isPartialResult) {
+        dev->mFrameNumberTracker.updateTracker(frameNumber, /*isError*/false);
+        dev->checkAndFireSequenceCompleteLocked();
+    }
+
+    return ret;
+}
+
+binder::Status
+CameraDevice::ServiceCallback::onPrepared(int) {
+    // Prepare not yet implemented in NDK
+    return binder::Status::ok();
+}
+
+binder::Status
+CameraDevice::ServiceCallback::onRepeatingRequestError(int64_t lastFrameNumber) {
+    binder::Status ret = binder::Status::ok();
+
+    sp<CameraDevice> dev = mDevice.promote();
+    if (dev == nullptr) {
+        return ret; // device has been closed
+    }
+
+    Mutex::Autolock _l(dev->mDeviceLock);
+
+    int repeatingSequenceId = dev->mRepeatingSequenceId;
+    dev->mRepeatingSequenceId = REQUEST_ID_NONE;
+
+    dev->checkRepeatingSequenceCompleteLocked(repeatingSequenceId, lastFrameNumber);
+
+    return ret;
+}
+
+
+} // namespace android
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
new file mode 100644
index 0000000..71e364d
--- /dev/null
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _ACAMERA_DEVICE_H
+#define _ACAMERA_DEVICE_H
+
+#include <memory>
+#include <map>
+#include <set>
+#include <atomic>
+#include <utility>
+#include <utils/StrongPointer.h>
+#include <utils/Mutex.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+#include <utils/Vector.h>
+
+#include <android/hardware/camera2/BnCameraDeviceCallbacks.h>
+#include <android/hardware/camera2/ICameraDeviceUser.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <camera/CaptureResult.h>
+#include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/CaptureRequest.h>
+
+#include <NdkCameraDevice.h>
+#include "ACameraMetadata.h"
+
+namespace android {
+
+// Wrap ACameraCaptureFailure so it can be ref-counter
+struct CameraCaptureFailure : public RefBase, public ACameraCaptureFailure {};
+
+class CameraDevice final : public RefBase {
+  public:
+    CameraDevice(const char* id, ACameraDevice_StateCallbacks* cb,
+                  std::unique_ptr<ACameraMetadata> chars,
+                  ACameraDevice* wrapper);
+    ~CameraDevice();
+
+    inline const char* getId() const { return mCameraId.string(); }
+
+    camera_status_t createCaptureRequest(
+            ACameraDevice_request_template templateId,
+            ACaptureRequest** request) const;
+
+    camera_status_t createCaptureSession(
+            const ACaptureSessionOutputContainer*       outputs,
+            const ACameraCaptureSession_stateCallbacks* callbacks,
+            /*out*/ACameraCaptureSession** session);
+
+    // Callbacks from camera service
+    class ServiceCallback : public hardware::camera2::BnCameraDeviceCallbacks {
+      public:
+        ServiceCallback(CameraDevice* device) : mDevice(device) {}
+        binder::Status onDeviceError(int32_t errorCode,
+                           const CaptureResultExtras& resultExtras) override;
+        binder::Status onDeviceIdle() override;
+        binder::Status onCaptureStarted(const CaptureResultExtras& resultExtras,
+                              int64_t timestamp) override;
+        binder::Status onResultReceived(const CameraMetadata& metadata,
+                              const CaptureResultExtras& resultExtras) override;
+        binder::Status onPrepared(int streamId) override;
+        binder::Status onRepeatingRequestError(int64_t lastFrameNumber) override;
+      private:
+        const wp<CameraDevice> mDevice;
+    };
+    inline sp<hardware::camera2::ICameraDeviceCallbacks> getServiceCallback() {
+        return mServiceCallback;
+    };
+
+    // Camera device is only functional after remote being set
+    void setRemoteDevice(sp<hardware::camera2::ICameraDeviceUser> remote);
+
+    inline ACameraDevice* getWrapper() const { return mWrapper; };
+
+  private:
+    friend ACameraCaptureSession;
+    camera_status_t checkCameraClosedOrErrorLocked() const;
+
+    // device goes into fatal error state after this
+    void setCameraDeviceErrorLocked(camera_status_t error);
+
+    void disconnectLocked(); // disconnect from camera service
+
+    camera_status_t stopRepeatingLocked();
+
+    camera_status_t flushLocked(ACameraCaptureSession*);
+
+    camera_status_t waitUntilIdleLocked();
+
+
+    camera_status_t captureLocked(sp<ACameraCaptureSession> session,
+            /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+            int numRequests, ACaptureRequest** requests,
+            /*optional*/int* captureSequenceId);
+
+    camera_status_t setRepeatingRequestsLocked(sp<ACameraCaptureSession> session,
+            /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+            int numRequests, ACaptureRequest** requests,
+            /*optional*/int* captureSequenceId);
+
+    camera_status_t submitRequestsLocked(
+            sp<ACameraCaptureSession> session,
+            /*optional*/ACameraCaptureSession_captureCallbacks* cbs,
+            int numRequests, ACaptureRequest** requests,
+            /*out*/int* captureSequenceId,
+            bool isRepeating);
+
+    static camera_status_t allocateCaptureRequest(
+            const ACaptureRequest* request, sp<CaptureRequest>& outReq);
+
+    static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req);
+    static void freeACaptureRequest(ACaptureRequest*);
+
+    // only For session to hold device lock
+    // Always grab device lock before grabbing session lock
+    void lockDeviceForSessionOps() const { mDeviceLock.lock(); };
+    void unlockDevice() const { mDeviceLock.unlock(); };
+
+    // For capture session to notify its end of life
+    void notifySessionEndOfLifeLocked(ACameraCaptureSession* session);
+
+    camera_status_t configureStreamsLocked(const ACaptureSessionOutputContainer* outputs);
+
+    static camera_status_t getIGBPfromAnw(
+            ANativeWindow* anw, sp<IGraphicBufferProducer>& out);
+
+    static camera_status_t getSurfaceFromANativeWindow(
+            ANativeWindow* anw, sp<Surface>& out);
+
+    mutable Mutex mDeviceLock;
+    const String8 mCameraId;                          // Camera ID
+    const ACameraDevice_StateCallbacks mAppCallbacks; // Callback to app
+    const std::unique_ptr<ACameraMetadata> mChars;    // Camera characteristics
+    const sp<ServiceCallback> mServiceCallback;
+    ACameraDevice* mWrapper;
+
+    // stream id -> pair of (ANW* from application, OutputConfiguration used for camera service)
+    std::map<int, std::pair<ANativeWindow*, OutputConfiguration>> mConfiguredOutputs;
+
+    // TODO: maybe a bool will suffice for synchronous implementation?
+    std::atomic_bool mClosing;
+    inline bool isClosed() { return mClosing; }
+
+    bool mInError = false;
+    camera_status_t mError = ACAMERA_OK;
+    void onCaptureErrorLocked(
+            int32_t errorCode,
+            const CaptureResultExtras& resultExtras);
+
+    bool mIdle = true;
+    // This will avoid a busy session being deleted before it's back to idle state
+    sp<ACameraCaptureSession> mBusySession;
+
+    sp<hardware::camera2::ICameraDeviceUser> mRemote;
+
+    // Looper thread to handle callback to app
+    sp<ALooper> mCbLooper;
+    // definition of handler and message
+    enum {
+        // Device state callbacks
+        kWhatOnDisconnected,   // onDisconnected
+        kWhatOnError,          // onError
+        // Session state callbacks
+        kWhatSessionStateCb,   // onReady, onActive
+        // Capture callbacks
+        kWhatCaptureStart,     // onCaptureStarted
+        kWhatCaptureResult,    // onCaptureProgressed, onCaptureCompleted
+        kWhatCaptureFail,      // onCaptureFailed
+        kWhatCaptureSeqEnd,    // onCaptureSequenceCompleted
+        kWhatCaptureSeqAbort,  // onCaptureSequenceAborted
+        kWhatCaptureBufferLost // onCaptureBufferLost
+    };
+    static const char* kContextKey;
+    static const char* kDeviceKey;
+    static const char* kErrorCodeKey;
+    static const char* kCallbackFpKey;
+    static const char* kSessionSpKey;
+    static const char* kCaptureRequestKey;
+    static const char* kTimeStampKey;
+    static const char* kCaptureResultKey;
+    static const char* kCaptureFailureKey;
+    static const char* kSequenceIdKey;
+    static const char* kFrameNumberKey;
+    static const char* kAnwKey;
+    class CallbackHandler : public AHandler {
+      public:
+        CallbackHandler() {}
+        void onMessageReceived(const sp<AMessage> &msg) override;
+    };
+    sp<CallbackHandler> mHandler;
+
+    /***********************************
+     * Capture session related members *
+     ***********************************/
+    // The current active session
+    ACameraCaptureSession* mCurrentSession = nullptr;
+    bool mFlushing = false;
+
+    int mNextSessionId = 0;
+    // TODO: might need another looper/handler to handle callbacks from service
+
+    static const int REQUEST_ID_NONE = -1;
+    int mRepeatingSequenceId = REQUEST_ID_NONE;
+
+    // sequence id -> last frame number map
+    std::map<int, int64_t> mSequenceLastFrameNumberMap;
+
+    struct CallbackHolder {
+        CallbackHolder(sp<ACameraCaptureSession>          session,
+                       const Vector<sp<CaptureRequest> >& requests,
+                       bool                               isRepeating,
+                       ACameraCaptureSession_captureCallbacks* cbs);
+
+        static ACameraCaptureSession_captureCallbacks fillCb(
+                ACameraCaptureSession_captureCallbacks* cbs) {
+            if (cbs != nullptr) {
+                return *cbs;
+            }
+            return { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
+        }
+
+        sp<ACameraCaptureSession>   mSession;
+        Vector<sp<CaptureRequest> > mRequests;
+        const bool                  mIsRepeating;
+        ACameraCaptureSession_captureCallbacks mCallbacks;
+    };
+    // sequence id -> callbacks map
+    std::map<int, CallbackHolder> mSequenceCallbackMap;
+
+    static const int64_t NO_FRAMES_CAPTURED = -1;
+    class FrameNumberTracker {
+      public:
+        // TODO: Called in onResultReceived and onCaptureErrorLocked
+        void updateTracker(int64_t frameNumber, bool isError);
+        inline int64_t getCompletedFrameNumber() { return mCompletedFrameNumber; }
+      private:
+        void update();
+        void updateCompletedFrameNumber(int64_t frameNumber);
+
+        int64_t mCompletedFrameNumber = NO_FRAMES_CAPTURED;
+        List<int64_t> mSkippedFrameNumbers;
+        std::set<int64_t> mFutureErrorSet;
+    };
+    FrameNumberTracker mFrameNumberTracker;
+
+    void checkRepeatingSequenceCompleteLocked(const int sequenceId, const int64_t lastFrameNumber);
+    void checkAndFireSequenceCompleteLocked();
+
+    // Misc variables
+    int32_t mShadingMapSize[2];   // const after constructor
+    int32_t mPartialResultCount;  // const after constructor
+
+};
+
+} // namespace android;
+
+/**
+ * ACameraDevice opaque struct definition
+ * Leave outside of android namespace because it's NDK struct
+ */
+struct ACameraDevice {
+    ACameraDevice(const char* id, ACameraDevice_StateCallbacks* cb,
+                  std::unique_ptr<ACameraMetadata> chars) :
+            mDevice(new CameraDevice(id, cb, std::move(chars), this)) {}
+
+    ~ACameraDevice() {};
+
+    /*******************
+     * NDK public APIs *
+     *******************/
+    inline const char* getId() const { return mDevice->getId(); }
+
+    camera_status_t createCaptureRequest(
+            ACameraDevice_request_template templateId,
+            ACaptureRequest** request) const {
+        return mDevice->createCaptureRequest(templateId, request);
+    }
+
+    camera_status_t createCaptureSession(
+            const ACaptureSessionOutputContainer*       outputs,
+            const ACameraCaptureSession_stateCallbacks* callbacks,
+            /*out*/ACameraCaptureSession** session) {
+        return mDevice->createCaptureSession(outputs, callbacks, session);
+    }
+
+    /***********************
+     * Device interal APIs *
+     ***********************/
+    inline android::sp<android::hardware::camera2::ICameraDeviceCallbacks> getServiceCallback() {
+        return mDevice->getServiceCallback();
+    };
+
+    // Camera device is only functional after remote being set
+    inline void setRemoteDevice(android::sp<android::hardware::camera2::ICameraDeviceUser> remote) {
+        mDevice->setRemoteDevice(remote);
+    }
+
+  private:
+    android::sp<android::CameraDevice> mDevice;
+};
+
+#endif // _ACAMERA_DEVICE_H
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
new file mode 100644
index 0000000..26d6679
--- /dev/null
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ACameraManager"
+
+#include <memory>
+#include "ACameraManager.h"
+#include "ACameraMetadata.h"
+#include "ACameraDevice.h"
+#include <utils/Vector.h>
+#include <stdlib.h>
+#include <camera/VendorTagDescriptor.h>
+
+using namespace android;
+
+//constants shared between ACameraManager and CameraManagerGlobal
+namespace {
+    const int kMaxCameraIdLen = 32;
+}
+
+namespace android {
+// Static member definitions
+const char* CameraManagerGlobal::kCameraIdKey   = "CameraId";
+const char* CameraManagerGlobal::kCallbackFpKey = "CallbackFp";
+const char* CameraManagerGlobal::kContextKey    = "CallbackContext";
+Mutex                CameraManagerGlobal::sLock;
+CameraManagerGlobal* CameraManagerGlobal::sInstance = nullptr;
+
+CameraManagerGlobal&
+CameraManagerGlobal::getInstance() {
+    Mutex::Autolock _l(sLock);
+    CameraManagerGlobal* instance = sInstance;
+    if (instance == nullptr) {
+        instance = new CameraManagerGlobal();
+        sInstance = instance;
+    }
+    return *instance;
+}
+
+CameraManagerGlobal::~CameraManagerGlobal() {
+    // clear sInstance so next getInstance call knows to create a new one
+    Mutex::Autolock _sl(sLock);
+    sInstance = nullptr;
+    Mutex::Autolock _l(mLock);
+    if (mCameraService != nullptr) {
+        IInterface::asBinder(mCameraService)->unlinkToDeath(mDeathNotifier);
+        mCameraService->removeListener(mCameraServiceListener);
+    }
+    mDeathNotifier.clear();
+    if (mCbLooper != nullptr) {
+        mCbLooper->unregisterHandler(mHandler->id());
+        mCbLooper->stop();
+    }
+    mCbLooper.clear();
+    mHandler.clear();
+    mCameraServiceListener.clear();
+    mCameraService.clear();
+}
+
+sp<hardware::ICameraService> CameraManagerGlobal::getCameraService() {
+    Mutex::Autolock _l(mLock);
+    if (mCameraService.get() == nullptr) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder;
+        do {
+            binder = sm->getService(String16(kCameraServiceName));
+            if (binder != nullptr) {
+                break;
+            }
+            ALOGW("CameraService not published, waiting...");
+            usleep(kCameraServicePollDelay);
+        } while(true);
+        if (mDeathNotifier == nullptr) {
+            mDeathNotifier = new DeathNotifier(this);
+        }
+        binder->linkToDeath(mDeathNotifier);
+        mCameraService = interface_cast<hardware::ICameraService>(binder);
+
+        // Setup looper thread to perfrom availiability callbacks
+        if (mCbLooper == nullptr) {
+            mCbLooper = new ALooper;
+            mCbLooper->setName("C2N-mgr-looper");
+            status_t err = mCbLooper->start(
+                    /*runOnCallingThread*/false,
+                    /*canCallJava*/       true,
+                    PRIORITY_DEFAULT);
+            if (err != OK) {
+                ALOGE("%s: Unable to start camera service listener looper: %s (%d)",
+                        __FUNCTION__, strerror(-err), err);
+                mCbLooper.clear();
+                return nullptr;
+            }
+            if (mHandler == nullptr) {
+                mHandler = new CallbackHandler();
+            }
+            mCbLooper->registerHandler(mHandler);
+        }
+
+        // register ICameraServiceListener
+        if (mCameraServiceListener == nullptr) {
+            mCameraServiceListener = new CameraServiceListener(this);
+        }
+        mCameraService->addListener(mCameraServiceListener);
+
+        // setup vendor tags
+        sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
+        binder::Status ret = mCameraService->getCameraVendorTagDescriptor(/*out*/desc.get());
+
+        if (ret.isOk()) {
+            status_t err = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+            if (err != OK) {
+                ALOGE("%s: Failed to set vendor tag descriptors, received error %s (%d)",
+                        __FUNCTION__, strerror(-err), err);
+            }
+        } else if (ret.serviceSpecificErrorCode() ==
+                hardware::ICameraService::ERROR_DEPRECATED_HAL) {
+            ALOGW("%s: Camera HAL too old; does not support vendor tags",
+                    __FUNCTION__);
+            VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+        } else {
+            ALOGE("%s: Failed to get vendor tag descriptors: %s",
+                    __FUNCTION__, ret.toString8().string());
+        }
+    }
+    ALOGE_IF(mCameraService == nullptr, "no CameraService!?");
+    return mCameraService;
+}
+
+void CameraManagerGlobal::DeathNotifier::binderDied(const wp<IBinder>&)
+{
+    ALOGE("Camera service binderDied!");
+    sp<CameraManagerGlobal> cm = mCameraManager.promote();
+    if (cm != nullptr) {
+        AutoMutex lock(cm->mLock);
+        for (auto pair : cm->mDeviceStatusMap) {
+            int32_t cameraId = pair.first;
+            cm->onStatusChangedLocked(
+                    CameraServiceListener::STATUS_NOT_PRESENT, cameraId);
+        }
+        cm->mCameraService.clear();
+        // TODO: consider adding re-connect call here?
+    }
+}
+
+void CameraManagerGlobal::registerAvailabilityCallback(
+        const ACameraManager_AvailabilityCallbacks *callback) {
+    Mutex::Autolock _l(mLock);
+    Callback cb(callback);
+    auto pair = mCallbacks.insert(cb);
+    // Send initial callbacks if callback is newly registered
+    if (pair.second) {
+        for (auto pair : mDeviceStatusMap) {
+            int32_t cameraId = pair.first;
+            int32_t status = pair.second;
+
+            sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
+            ACameraManager_AvailabilityCallback cb = isStatusAvailable(status) ?
+                    callback->onCameraAvailable : callback->onCameraUnavailable;
+            msg->setPointer(kCallbackFpKey, (void *) cb);
+            msg->setPointer(kContextKey, callback->context);
+            msg->setInt32(kCameraIdKey, cameraId);
+            msg->post();
+        }
+    }
+}
+
+void CameraManagerGlobal::unregisterAvailabilityCallback(
+        const ACameraManager_AvailabilityCallbacks *callback) {
+    Mutex::Autolock _l(mLock);
+    Callback cb(callback);
+    mCallbacks.erase(cb);
+}
+
+bool CameraManagerGlobal::validStatus(int32_t status) {
+    switch (status) {
+        case hardware::ICameraServiceListener::STATUS_NOT_PRESENT:
+        case hardware::ICameraServiceListener::STATUS_PRESENT:
+        case hardware::ICameraServiceListener::STATUS_ENUMERATING:
+        case hardware::ICameraServiceListener::STATUS_NOT_AVAILABLE:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool CameraManagerGlobal::isStatusAvailable(int32_t status) {
+    switch (status) {
+        case hardware::ICameraServiceListener::STATUS_PRESENT:
+            return true;
+        default:
+            return false;
+    }
+}
+
+void CameraManagerGlobal::CallbackHandler::sendSingleCallback(
+        int32_t cameraId, void* context,
+        ACameraManager_AvailabilityCallback cb) const {
+    char cameraIdStr[kMaxCameraIdLen];
+    snprintf(cameraIdStr, sizeof(cameraIdStr), "%d", cameraId);
+    (*cb)(context, cameraIdStr);
+}
+
+void CameraManagerGlobal::CallbackHandler::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatSendSingleCallback:
+        {
+            ACameraManager_AvailabilityCallback cb;
+            void* context;
+            int32_t cameraId;
+            bool found = msg->findPointer(kCallbackFpKey, (void**) &cb);
+            if (!found) {
+                ALOGE("%s: Cannot find camera callback fp!", __FUNCTION__);
+                return;
+            }
+            found = msg->findPointer(kContextKey, &context);
+            if (!found) {
+                ALOGE("%s: Cannot find callback context!", __FUNCTION__);
+                return;
+            }
+            found = msg->findInt32(kCameraIdKey, &cameraId);
+            if (!found) {
+                ALOGE("%s: Cannot find camera ID!", __FUNCTION__);
+                return;
+            }
+            sendSingleCallback(cameraId, context, cb);
+            break;
+        }
+        default:
+            ALOGE("%s: unknown message type %d", __FUNCTION__, msg->what());
+            break;
+    }
+}
+
+binder::Status CameraManagerGlobal::CameraServiceListener::onStatusChanged(
+        int32_t status, int32_t cameraId) {
+    sp<CameraManagerGlobal> cm = mCameraManager.promote();
+    if (cm != nullptr) {
+        cm->onStatusChanged(status, cameraId);
+    } else {
+        ALOGE("Cannot deliver status change. Global camera manager died");
+    }
+    return binder::Status::ok();
+}
+
+void CameraManagerGlobal::onStatusChanged(
+        int32_t status, int32_t cameraId) {
+    Mutex::Autolock _l(mLock);
+    onStatusChangedLocked(status, cameraId);
+}
+
+void CameraManagerGlobal::onStatusChangedLocked(
+        int32_t status, int32_t cameraId) {
+        if (!validStatus(status)) {
+            ALOGE("%s: Invalid status %d", __FUNCTION__, status);
+            return;
+        }
+
+        bool firstStatus = (mDeviceStatusMap.count(cameraId) == 0);
+        int32_t oldStatus = firstStatus ?
+                status : // first status
+                mDeviceStatusMap[cameraId];
+
+        if (!firstStatus &&
+                isStatusAvailable(status) == isStatusAvailable(oldStatus)) {
+            // No status update. No need to send callback
+            return;
+        }
+
+        // Iterate through all registered callbacks
+        mDeviceStatusMap[cameraId] = status;
+        for (auto cb : mCallbacks) {
+            sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
+            ACameraManager_AvailabilityCallback cbFp = isStatusAvailable(status) ?
+                    cb.mAvailable : cb.mUnavailable;
+            msg->setPointer(kCallbackFpKey, (void *) cbFp);
+            msg->setPointer(kContextKey, cb.mContext);
+            msg->setInt32(kCameraIdKey, cameraId);
+            msg->post();
+        }
+}
+
+} // namespace android
+
+/**
+ * ACameraManger Implementation
+ */
+camera_status_t
+ACameraManager::getOrCreateCameraIdListLocked(ACameraIdList** cameraIdList) {
+    if (mCachedCameraIdList.numCameras == kCameraIdListNotInit) {
+        int numCameras = 0;
+        Vector<char *> cameraIds;
+        sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
+        if (cs == nullptr) {
+            ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
+            return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+        }
+        // Get number of cameras
+        int numAllCameras = 0;
+        binder::Status serviceRet = cs->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_ALL,
+                &numAllCameras);
+        if (!serviceRet.isOk()) {
+            ALOGE("%s: Error getting camera count: %s", __FUNCTION__,
+                    serviceRet.toString8().string());
+            numAllCameras = 0;
+        }
+        // Filter API2 compatible cameras and push to cameraIds
+        for (int i = 0; i < numAllCameras; i++) {
+            // TODO: Only suppot HALs that supports API2 directly now
+            bool camera2Support = false;
+            serviceRet = cs->supportsCameraApi(i, hardware::ICameraService::API_VERSION_2,
+                    &camera2Support);
+            char buf[kMaxCameraIdLen];
+            if (camera2Support) {
+                numCameras++;
+                mCameraIds.insert(i);
+                snprintf(buf, sizeof(buf), "%d", i);
+                size_t cameraIdSize = strlen(buf) + 1;
+                char *cameraId = new char[cameraIdSize];
+                if (!cameraId) {
+                    ALOGE("Allocate memory for ACameraIdList failed!");
+                    return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
+                }
+                strlcpy(cameraId, buf, cameraIdSize);
+                cameraIds.push(cameraId);
+            }
+        }
+        mCachedCameraIdList.numCameras = numCameras;
+        mCachedCameraIdList.cameraIds = new const char*[numCameras];
+        if (!mCachedCameraIdList.cameraIds) {
+            ALOGE("Allocate memory for ACameraIdList failed!");
+            return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
+        }
+        for (int i = 0; i < numCameras; i++) {
+            mCachedCameraIdList.cameraIds[i] = cameraIds[i];
+        }
+    }
+    *cameraIdList = &mCachedCameraIdList;
+    return ACAMERA_OK;
+}
+
+camera_status_t
+ACameraManager::getCameraIdList(ACameraIdList** cameraIdList) {
+    Mutex::Autolock _l(mLock);
+    ACameraIdList* cachedList;
+    camera_status_t ret = getOrCreateCameraIdListLocked(&cachedList);
+    if (ret != ACAMERA_OK) {
+        ALOGE("Get camera ID list failed! err: %d", ret);
+        return ret;
+    }
+
+    int numCameras = cachedList->numCameras;
+    ACameraIdList *out = new ACameraIdList;
+    if (!out) {
+        ALOGE("Allocate memory for ACameraIdList failed!");
+        return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
+    }
+    out->numCameras = numCameras;
+    out->cameraIds = new const char*[numCameras];
+    if (!out->cameraIds) {
+        ALOGE("Allocate memory for ACameraIdList failed!");
+        return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
+    }
+    for (int i = 0; i < numCameras; i++) {
+        const char* src = cachedList->cameraIds[i];
+        size_t dstSize = strlen(src) + 1;
+        char* dst = new char[dstSize];
+        if (!dst) {
+            ALOGE("Allocate memory for ACameraIdList failed!");
+            return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
+        }
+        strlcpy(dst, src, dstSize);
+        out->cameraIds[i] = dst;
+    }
+    *cameraIdList = out;
+    return ACAMERA_OK;
+}
+
+void
+ACameraManager::deleteCameraIdList(ACameraIdList* cameraIdList) {
+    if (cameraIdList != nullptr) {
+        if (cameraIdList->cameraIds != nullptr) {
+            for (int i = 0; i < cameraIdList->numCameras; i ++) {
+                delete[] cameraIdList->cameraIds[i];
+            }
+            delete[] cameraIdList->cameraIds;
+        }
+        delete cameraIdList;
+    }
+}
+
+camera_status_t ACameraManager::getCameraCharacteristics(
+        const char *cameraIdStr, ACameraMetadata **characteristics) {
+    Mutex::Autolock _l(mLock);
+    ACameraIdList* cachedList;
+    // Make sure mCameraIds is initialized
+    camera_status_t ret = getOrCreateCameraIdListLocked(&cachedList);
+    if (ret != ACAMERA_OK) {
+        ALOGE("%s: Get camera ID list failed! err: %d", __FUNCTION__, ret);
+        return ret;
+    }
+    int cameraId = atoi(cameraIdStr);
+    if (mCameraIds.count(cameraId) == 0) {
+        ALOGE("%s: Camera ID %s does not exist!", __FUNCTION__, cameraIdStr);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
+    if (cs == nullptr) {
+        ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
+        return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+    }
+    CameraMetadata rawMetadata;
+    binder::Status serviceRet = cs->getCameraCharacteristics(cameraId, &rawMetadata);
+    if (!serviceRet.isOk()) {
+        ALOGE("Get camera characteristics from camera service failed: %s",
+                serviceRet.toString8().string());
+        return ACAMERA_ERROR_UNKNOWN; // should not reach here
+    }
+
+    *characteristics = new ACameraMetadata(
+            rawMetadata.release(), ACameraMetadata::ACM_CHARACTERISTICS);
+    return ACAMERA_OK;
+}
+
+camera_status_t
+ACameraManager::openCamera(
+        const char* cameraId,
+        ACameraDevice_StateCallbacks* callback,
+        /*out*/ACameraDevice** outDevice) {
+    ACameraMetadata* rawChars;
+    camera_status_t ret = getCameraCharacteristics(cameraId, &rawChars);
+    Mutex::Autolock _l(mLock);
+    if (ret != ACAMERA_OK) {
+        ALOGE("%s: cannot get camera characteristics for camera %s. err %d",
+                __FUNCTION__, cameraId, ret);
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+    std::unique_ptr<ACameraMetadata> chars(rawChars);
+    rawChars = nullptr;
+
+    ACameraDevice* device = new ACameraDevice(cameraId, callback, std::move(chars));
+
+    sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
+    if (cs == nullptr) {
+        ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
+        return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+    }
+
+    int id = atoi(cameraId);
+    sp<hardware::camera2::ICameraDeviceCallbacks> callbacks = device->getServiceCallback();
+    sp<hardware::camera2::ICameraDeviceUser> deviceRemote;
+    // No way to get package name from native.
+    // Send a zero length package name and let camera service figure it out from UID
+    binder::Status serviceRet = cs->connectDevice(
+            callbacks, id, String16(""),
+            hardware::ICameraService::USE_CALLING_UID, /*out*/&deviceRemote);
+
+    if (!serviceRet.isOk()) {
+        ALOGE("%s: connect camera device failed: %s", __FUNCTION__, serviceRet.toString8().string());
+        // Convert serviceRet to camera_status_t
+        switch(serviceRet.serviceSpecificErrorCode()) {
+            case hardware::ICameraService::ERROR_DISCONNECTED:
+                ret = ACAMERA_ERROR_CAMERA_DISCONNECTED;
+                break;
+            case hardware::ICameraService::ERROR_CAMERA_IN_USE:
+                ret = ACAMERA_ERROR_CAMERA_IN_USE;
+                break;
+            case hardware::ICameraService::ERROR_MAX_CAMERAS_IN_USE:
+                ret = ACAMERA_ERROR_MAX_CAMERA_IN_USE;
+                break;
+            case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
+                ret = ACAMERA_ERROR_INVALID_PARAMETER;
+                break;
+            case hardware::ICameraService::ERROR_DEPRECATED_HAL:
+                // Should not reach here since we filtered legacy HALs earlier
+                ret = ACAMERA_ERROR_INVALID_PARAMETER;
+                break;
+            case hardware::ICameraService::ERROR_DISABLED:
+                ret = ACAMERA_ERROR_CAMERA_DISABLED;
+                break;
+            case hardware::ICameraService::ERROR_PERMISSION_DENIED:
+                ret = ACAMERA_ERROR_PERMISSION_DENIED;
+                break;
+            case hardware::ICameraService::ERROR_INVALID_OPERATION:
+            default:
+                ret = ACAMERA_ERROR_UNKNOWN;
+                break;
+        }
+
+        delete device;
+        return ret;
+    }
+    if (deviceRemote == nullptr) {
+        ALOGE("%s: connect camera device failed! remote device is null", __FUNCTION__);
+        delete device;
+        return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+    }
+    device->setRemoteDevice(deviceRemote);
+    *outDevice = device;
+    return ACAMERA_OK;
+}
+
+ACameraManager::~ACameraManager() {
+    Mutex::Autolock _l(mLock);
+    if (mCachedCameraIdList.numCameras != kCameraIdListNotInit) {
+        for (int i = 0; i < mCachedCameraIdList.numCameras; i++) {
+            delete[] mCachedCameraIdList.cameraIds[i];
+        }
+        delete[] mCachedCameraIdList.cameraIds;
+    }
+}
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
new file mode 100644
index 0000000..3f2262f
--- /dev/null
+++ b/camera/ndk/impl/ACameraManager.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _ACAMERA_MANAGER_H
+#define _ACAMERA_MANAGER_H
+
+#include "NdkCameraManager.h"
+
+#include <android/hardware/ICameraService.h>
+#include <android/hardware/BnCameraServiceListener.h>
+#include <camera/CameraMetadata.h>
+#include <binder/IServiceManager.h>
+#include <utils/StrongPointer.h>
+#include <utils/Mutex.h>
+
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <set>
+#include <map>
+
+namespace android {
+
+/**
+ * Per-process singleton instance of CameraManger. Shared by all ACameraManager
+ * instances. Created when first ACameraManager is created and destroyed when
+ * all ACameraManager instances are deleted.
+ *
+ * TODO: maybe CameraManagerGlobal is better sutied in libcameraclient?
+ */
+class CameraManagerGlobal final : public RefBase {
+  public:
+    static CameraManagerGlobal& getInstance();
+    sp<hardware::ICameraService> getCameraService();
+
+    void registerAvailabilityCallback(
+            const ACameraManager_AvailabilityCallbacks *callback);
+    void unregisterAvailabilityCallback(
+            const ACameraManager_AvailabilityCallbacks *callback);
+
+  private:
+    sp<hardware::ICameraService> mCameraService;
+    const int          kCameraServicePollDelay = 500000; // 0.5s
+    const char*        kCameraServiceName      = "media.camera";
+    Mutex              mLock;
+
+    class DeathNotifier : public IBinder::DeathRecipient {
+      public:
+        DeathNotifier(CameraManagerGlobal* cm) : mCameraManager(cm) {}
+      protected:
+        // IBinder::DeathRecipient implementation
+        virtual void binderDied(const wp<IBinder>& who);
+      private:
+        const wp<CameraManagerGlobal> mCameraManager;
+    };
+    sp<DeathNotifier> mDeathNotifier;
+
+    class CameraServiceListener final : public hardware::BnCameraServiceListener {
+      public:
+        CameraServiceListener(CameraManagerGlobal* cm) : mCameraManager(cm) {}
+        virtual binder::Status onStatusChanged(int32_t status, int32_t cameraId);
+
+        // Torch API not implemented yet
+        virtual binder::Status onTorchStatusChanged(int32_t, const String16&) {
+            return binder::Status::ok();
+        }
+
+      private:
+        const wp<CameraManagerGlobal> mCameraManager;
+    };
+    sp<CameraServiceListener> mCameraServiceListener;
+
+    // Wrapper of ACameraManager_AvailabilityCallbacks so we can store it in std::set
+    struct Callback {
+        Callback(const ACameraManager_AvailabilityCallbacks *callback) :
+            mAvailable(callback->onCameraAvailable),
+            mUnavailable(callback->onCameraUnavailable),
+            mContext(callback->context) {}
+
+        bool operator == (const Callback& other) const {
+            return (mAvailable == other.mAvailable &&
+                    mUnavailable == other.mUnavailable &&
+                    mContext == other.mContext);
+        }
+        bool operator != (const Callback& other) const {
+            return !(*this == other);
+        }
+        bool operator < (const Callback& other) const {
+            if (*this == other) return false;
+            if (mContext != other.mContext) return mContext < other.mContext;
+            if (mAvailable != other.mAvailable) return mAvailable < other.mAvailable;
+            return mUnavailable < other.mUnavailable;
+        }
+        bool operator > (const Callback& other) const {
+            return (*this != other && !(*this < other));
+        }
+        ACameraManager_AvailabilityCallback mAvailable;
+        ACameraManager_AvailabilityCallback mUnavailable;
+        void*                               mContext;
+    };
+    std::set<Callback> mCallbacks;
+
+    // definition of handler and message
+    enum {
+        kWhatSendSingleCallback
+    };
+    static const char* kCameraIdKey;
+    static const char* kCallbackFpKey;
+    static const char* kContextKey;
+    class CallbackHandler : public AHandler {
+      public:
+        CallbackHandler() {}
+        void onMessageReceived(const sp<AMessage> &msg) override;
+      private:
+        inline void sendSingleCallback(
+                int32_t cameraId, void* context,
+                ACameraManager_AvailabilityCallback cb) const;
+    };
+    sp<CallbackHandler> mHandler;
+    sp<ALooper>         mCbLooper; // Looper thread where callbacks actually happen on
+
+    void onStatusChanged(int32_t status, int32_t cameraId);
+    void onStatusChangedLocked(int32_t status, int32_t cameraId);
+    // Utils for status
+    static bool validStatus(int32_t status);
+    static bool isStatusAvailable(int32_t status);
+
+    // Map camera_id -> status
+    std::map<int32_t, int32_t> mDeviceStatusMap;
+
+    // For the singleton instance
+    static Mutex sLock;
+    static CameraManagerGlobal* sInstance;
+    CameraManagerGlobal() {};
+    ~CameraManagerGlobal();
+};
+
+} // namespace android;
+
+/**
+ * ACameraManager opaque struct definition
+ * Leave outside of android namespace because it's NDK struct
+ */
+struct ACameraManager {
+    ACameraManager() :
+            mCachedCameraIdList({kCameraIdListNotInit, nullptr}),
+            mGlobalManager(&(android::CameraManagerGlobal::getInstance())) {}
+    ~ACameraManager();
+    camera_status_t getCameraIdList(ACameraIdList** cameraIdList);
+    static void     deleteCameraIdList(ACameraIdList* cameraIdList);
+
+    camera_status_t getCameraCharacteristics(
+            const char *cameraId, ACameraMetadata **characteristics);
+    camera_status_t openCamera(const char* cameraId,
+                               ACameraDevice_StateCallbacks* callback,
+                               /*out*/ACameraDevice** device);
+
+  private:
+    camera_status_t getOrCreateCameraIdListLocked(ACameraIdList** cameraIdList);
+
+    enum {
+        kCameraIdListNotInit = -1
+    };
+    android::Mutex         mLock;
+    std::set<int> mCameraIds;          // Init by getOrCreateCameraIdListLocked
+    ACameraIdList mCachedCameraIdList; // Init by getOrCreateCameraIdListLocked
+    android::sp<android::CameraManagerGlobal> mGlobalManager;
+};
+
+#endif //_ACAMERA_MANAGER_H
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
new file mode 100644
index 0000000..ab99e38
--- /dev/null
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -0,0 +1,365 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ACameraMetadata"
+
+#include "ACameraMetadata.h"
+#include <utils/Vector.h>
+#include <system/graphics.h>
+#include "NdkImage.h"
+
+using namespace android;
+
+/**
+ * ACameraMetadata Implementation
+ */
+ACameraMetadata::ACameraMetadata(camera_metadata_t* buffer, ACAMERA_METADATA_TYPE type) :
+        mData(buffer), mType(type) {
+    if (mType == ACM_CHARACTERISTICS) {
+        filterUnsupportedFeatures();
+        filterStreamConfigurations();
+    }
+    // TODO: filter request/result keys
+}
+
+bool
+ACameraMetadata::isNdkSupportedCapability(int32_t capability) {
+    switch (capability) {
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE:
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR:
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING:
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW:
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS:
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE:
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT:
+            return true;
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING:
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING:
+        case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO:
+            return false;
+        default:
+            // Newly defined capabilities will be unsupported by default (blacklist)
+            // TODO: Should we do whitelist or blacklist here?
+            ALOGE("%s: Unknonwn capability %d", __FUNCTION__, capability);
+            return false;
+    }
+}
+
+void
+ACameraMetadata::filterUnsupportedFeatures() {
+    // Hide unsupported capabilities (reprocessing)
+    camera_metadata_entry entry = mData.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    if (entry.count == 0 || entry.type != TYPE_BYTE) {
+        ALOGE("%s: malformed available capability key! count %zu, type %d",
+                __FUNCTION__, entry.count, entry.type);
+        return;
+    }
+
+    Vector<uint8_t> capabilities;
+    capabilities.setCapacity(entry.count);
+    for (size_t i = 0; i < entry.count; i++) {
+        uint8_t capability = entry.data.u8[i];
+        if (isNdkSupportedCapability(capability)) {
+            capabilities.push(capability);
+        }
+    }
+    mData.update(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, capabilities);
+}
+
+
+void
+ACameraMetadata::filterStreamConfigurations() {
+    const int STREAM_CONFIGURATION_SIZE = 4;
+    const int STREAM_FORMAT_OFFSET = 0;
+    const int STREAM_WIDTH_OFFSET = 1;
+    const int STREAM_HEIGHT_OFFSET = 2;
+    const int STREAM_IS_INPUT_OFFSET = 3;
+    camera_metadata_entry entry = mData.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+    if (entry.count == 0 || entry.count % 4 || entry.type != TYPE_INT32) {
+        ALOGE("%s: malformed available stream configuration key! count %zu, type %d",
+                __FUNCTION__, entry.count, entry.type);
+        return;
+    }
+
+    Vector<int32_t> filteredStreamConfigs;
+    filteredStreamConfigs.setCapacity(entry.count);
+
+    for (size_t i=0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
+        int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
+        int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
+        int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
+        int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
+        if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT) {
+            // Hide input streams
+            continue;
+        }
+        // Translate HAL formats to NDK format
+        if (format == HAL_PIXEL_FORMAT_BLOB) {
+            format = AIMAGE_FORMAT_JPEG;
+        }
+        filteredStreamConfigs.push_back(format);
+        filteredStreamConfigs.push_back(width);
+        filteredStreamConfigs.push_back(height);
+        filteredStreamConfigs.push_back(isInput);
+    }
+
+    mData.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, filteredStreamConfigs);
+
+    entry = mData.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
+    Vector<int32_t> filteredDepthStreamConfigs;
+    filteredDepthStreamConfigs.setCapacity(entry.count);
+
+    for (size_t i=0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
+        int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
+        int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
+        int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
+        int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
+        if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT) {
+            // Hide input streams
+            continue;
+        }
+        // Translate HAL formats to NDK format
+        if (format == HAL_PIXEL_FORMAT_BLOB) {
+            format = AIMAGE_FORMAT_DEPTH_POINT_CLOUD;
+        } else if (format == HAL_PIXEL_FORMAT_Y16) {
+            format = AIMAGE_FORMAT_DEPTH16;
+        }
+
+        filteredDepthStreamConfigs.push_back(format);
+        filteredDepthStreamConfigs.push_back(width);
+        filteredDepthStreamConfigs.push_back(height);
+        filteredDepthStreamConfigs.push_back(isInput);
+    }
+    mData.update(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, filteredDepthStreamConfigs);
+}
+
+bool
+ACameraMetadata::isVendorTag(const uint32_t tag) {
+    uint32_t tag_section = tag >> 16;
+    if (tag_section >= VENDOR_SECTION) {
+        return true;
+    }
+    return false;
+}
+
+camera_status_t
+ACameraMetadata::getConstEntry(uint32_t tag, ACameraMetadata_const_entry* entry) const {
+    if (entry == nullptr) {
+        return ACAMERA_ERROR_INVALID_PARAMETER;
+    }
+
+    Mutex::Autolock _l(mLock);
+
+    camera_metadata_ro_entry rawEntry = mData.find(tag);
+    if (rawEntry.count == 0) {
+        ALOGE("%s: cannot find metadata tag %d", __FUNCTION__, tag);
+        return ACAMERA_ERROR_METADATA_NOT_FOUND;
+    }
+    entry->tag = tag;
+    entry->type = rawEntry.type;
+    entry->count = rawEntry.count;
+    entry->data.u8 = rawEntry.data.u8;
+    return ACAMERA_OK;
+}
+
+camera_status_t
+ACameraMetadata::update(uint32_t tag, uint32_t count, const uint8_t* data) {
+    return updateImpl<uint8_t>(tag, count, data);
+}
+
+camera_status_t
+ACameraMetadata::update(uint32_t tag, uint32_t count, const int32_t* data) {
+    return updateImpl<int32_t>(tag, count, data);
+}
+
+camera_status_t
+ACameraMetadata::update(uint32_t tag, uint32_t count, const float* data) {
+    return updateImpl<float>(tag, count, data);
+}
+
+camera_status_t
+ACameraMetadata::update(uint32_t tag, uint32_t count, const double* data) {
+    return updateImpl<double>(tag, count, data);
+}
+
+camera_status_t
+ACameraMetadata::update(uint32_t tag, uint32_t count, const int64_t* data) {
+    return updateImpl<int64_t>(tag, count, data);
+}
+
+camera_status_t
+ACameraMetadata::update(uint32_t tag, uint32_t count, const ACameraMetadata_rational* data) {
+    return updateImpl<camera_metadata_rational_t>(tag, count, data);
+}
+
+camera_status_t
+ACameraMetadata::getTags(/*out*/int32_t* numTags,
+                         /*out*/const uint32_t** tags) const {
+    Mutex::Autolock _l(mLock);
+    if (mTags.size() == 0) {
+        size_t entry_count = mData.entryCount();
+        mTags.setCapacity(entry_count);
+        const camera_metadata_t* rawMetadata = mData.getAndLock();
+        for (size_t i = 0; i < entry_count; i++) {
+            camera_metadata_ro_entry_t entry;
+            int ret = get_camera_metadata_ro_entry(rawMetadata, i, &entry);
+            if (ret != 0) {
+                ALOGE("%s: error reading metadata index %zu", __FUNCTION__, i);
+                return ACAMERA_ERROR_UNKNOWN;
+            }
+            // Hide system key from users
+            if (sSystemTags.count(entry.tag) == 0) {
+                mTags.push_back(entry.tag);
+            }
+        }
+        mData.unlock(rawMetadata);
+    }
+
+    *numTags = mTags.size();
+    *tags = mTags.array();
+    return ACAMERA_OK;
+}
+
+const CameraMetadata&
+ACameraMetadata::getInternalData() {
+    return mData;
+}
+
+// TODO: some of key below should be hidden from user
+// ex: ACAMERA_REQUEST_ID and ACAMERA_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR
+/*@O~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~
+ * The key entries below this point are generated from metadata
+ * definitions in /system/media/camera/docs. Do not modify by hand or
+ * modify the comment blocks at the start or end.
+ *~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~*/
+
+bool
+ACameraMetadata::isCaptureRequestTag(const uint32_t tag) {
+    // Skip check for vendor keys
+    if (isVendorTag(tag)) {
+        return true;
+    }
+
+    switch (tag) {
+        case ACAMERA_COLOR_CORRECTION_MODE:
+        case ACAMERA_COLOR_CORRECTION_TRANSFORM:
+        case ACAMERA_COLOR_CORRECTION_GAINS:
+        case ACAMERA_COLOR_CORRECTION_ABERRATION_MODE:
+        case ACAMERA_CONTROL_AE_ANTIBANDING_MODE:
+        case ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION:
+        case ACAMERA_CONTROL_AE_LOCK:
+        case ACAMERA_CONTROL_AE_MODE:
+        case ACAMERA_CONTROL_AE_REGIONS:
+        case ACAMERA_CONTROL_AE_TARGET_FPS_RANGE:
+        case ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER:
+        case ACAMERA_CONTROL_AF_MODE:
+        case ACAMERA_CONTROL_AF_REGIONS:
+        case ACAMERA_CONTROL_AF_TRIGGER:
+        case ACAMERA_CONTROL_AWB_LOCK:
+        case ACAMERA_CONTROL_AWB_MODE:
+        case ACAMERA_CONTROL_AWB_REGIONS:
+        case ACAMERA_CONTROL_CAPTURE_INTENT:
+        case ACAMERA_CONTROL_EFFECT_MODE:
+        case ACAMERA_CONTROL_MODE:
+        case ACAMERA_CONTROL_SCENE_MODE:
+        case ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE:
+        case ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST:
+        case ACAMERA_EDGE_MODE:
+        case ACAMERA_FLASH_MODE:
+        case ACAMERA_HOT_PIXEL_MODE:
+        case ACAMERA_JPEG_GPS_COORDINATES:
+        case ACAMERA_JPEG_GPS_PROCESSING_METHOD:
+        case ACAMERA_JPEG_GPS_TIMESTAMP:
+        case ACAMERA_JPEG_ORIENTATION:
+        case ACAMERA_JPEG_QUALITY:
+        case ACAMERA_JPEG_THUMBNAIL_QUALITY:
+        case ACAMERA_JPEG_THUMBNAIL_SIZE:
+        case ACAMERA_LENS_APERTURE:
+        case ACAMERA_LENS_FILTER_DENSITY:
+        case ACAMERA_LENS_FOCAL_LENGTH:
+        case ACAMERA_LENS_FOCUS_DISTANCE:
+        case ACAMERA_LENS_OPTICAL_STABILIZATION_MODE:
+        case ACAMERA_NOISE_REDUCTION_MODE:
+        case ACAMERA_SCALER_CROP_REGION:
+        case ACAMERA_SENSOR_EXPOSURE_TIME:
+        case ACAMERA_SENSOR_FRAME_DURATION:
+        case ACAMERA_SENSOR_SENSITIVITY:
+        case ACAMERA_SENSOR_TEST_PATTERN_DATA:
+        case ACAMERA_SENSOR_TEST_PATTERN_MODE:
+        case ACAMERA_SHADING_MODE:
+        case ACAMERA_STATISTICS_FACE_DETECT_MODE:
+        case ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE:
+        case ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE:
+        case ACAMERA_TONEMAP_CURVE_BLUE:
+        case ACAMERA_TONEMAP_CURVE_GREEN:
+        case ACAMERA_TONEMAP_CURVE_RED:
+        case ACAMERA_TONEMAP_MODE:
+        case ACAMERA_TONEMAP_GAMMA:
+        case ACAMERA_TONEMAP_PRESET_CURVE:
+        case ACAMERA_BLACK_LEVEL_LOCK:
+            return true;
+        default:
+            return false;
+    }
+}
+
+// System tags that should be hidden from users
+std::unordered_set<uint32_t> ACameraMetadata::sSystemTags ({
+    ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
+    ANDROID_CONTROL_AE_PRECAPTURE_ID,
+    ANDROID_CONTROL_AF_TRIGGER_ID,
+    ANDROID_DEMOSAIC_MODE,
+    ANDROID_EDGE_STRENGTH,
+    ANDROID_FLASH_FIRING_POWER,
+    ANDROID_FLASH_FIRING_TIME,
+    ANDROID_FLASH_COLOR_TEMPERATURE,
+    ANDROID_FLASH_MAX_ENERGY,
+    ANDROID_FLASH_INFO_CHARGE_DURATION,
+    ANDROID_JPEG_MAX_SIZE,
+    ANDROID_JPEG_SIZE,
+    ANDROID_NOISE_REDUCTION_STRENGTH,
+    ANDROID_QUIRKS_METERING_CROP_REGION,
+    ANDROID_QUIRKS_TRIGGER_AF_WITH_AUTO,
+    ANDROID_QUIRKS_USE_ZSL_FORMAT,
+    ANDROID_REQUEST_INPUT_STREAMS,
+    ANDROID_REQUEST_METADATA_MODE,
+    ANDROID_REQUEST_OUTPUT_STREAMS,
+    ANDROID_REQUEST_TYPE,
+    ANDROID_REQUEST_MAX_NUM_REPROCESS_STREAMS,
+    ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
+    ANDROID_SCALER_AVAILABLE_RAW_SIZES,
+    ANDROID_SENSOR_BASE_GAIN_FACTOR,
+    ANDROID_SENSOR_PROFILE_HUE_SAT_MAP_DIMENSIONS,
+    ANDROID_SENSOR_TEMPERATURE,
+    ANDROID_SENSOR_PROFILE_HUE_SAT_MAP,
+    ANDROID_SENSOR_PROFILE_TONE_CURVE,
+    ANDROID_SENSOR_OPAQUE_RAW_SIZE,
+    ANDROID_SHADING_STRENGTH,
+    ANDROID_STATISTICS_HISTOGRAM_MODE,
+    ANDROID_STATISTICS_SHARPNESS_MAP_MODE,
+    ANDROID_STATISTICS_HISTOGRAM,
+    ANDROID_STATISTICS_SHARPNESS_MAP,
+    ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT,
+    ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT,
+    ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
+    ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
+    ANDROID_DEPTH_MAX_DEPTH_SAMPLES,
+});
+
+/*~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~
+ * End generated code
+ *~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~O@*/
diff --git a/camera/ndk/impl/ACameraMetadata.h b/camera/ndk/impl/ACameraMetadata.h
new file mode 100644
index 0000000..8d050c4
--- /dev/null
+++ b/camera/ndk/impl/ACameraMetadata.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _ACAMERA_METADATA_H
+#define _ACAMERA_METADATA_H
+
+#include <unordered_set>
+
+#include <sys/types.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+#include <camera/CameraMetadata.h>
+
+#include "NdkCameraMetadata.h"
+
+using namespace android;
+
+/**
+ * ACameraMetadata opaque struct definition
+ * Leave outside of android namespace because it's NDK struct
+ */
+struct ACameraMetadata : public RefBase {
+  public:
+    typedef enum {
+        ACM_CHARACTERISTICS, // Read only
+        ACM_REQUEST,         // Read/Write
+        ACM_RESULT,          // Read only
+    } ACAMERA_METADATA_TYPE;
+
+    // Takes ownership of pass-in buffer
+    ACameraMetadata(camera_metadata_t *buffer, ACAMERA_METADATA_TYPE type);
+    // Clone
+    ACameraMetadata(const ACameraMetadata& other) :
+            mData(other.mData), mType(other.mType) {};
+
+    camera_status_t getConstEntry(uint32_t tag, ACameraMetadata_const_entry* entry) const;
+
+    camera_status_t update(uint32_t tag, uint32_t count, const uint8_t* data);
+    camera_status_t update(uint32_t tag, uint32_t count, const int32_t* data);
+    camera_status_t update(uint32_t tag, uint32_t count, const float* data);
+    camera_status_t update(uint32_t tag, uint32_t count, const double* data);
+    camera_status_t update(uint32_t tag, uint32_t count, const int64_t* data);
+    camera_status_t update(uint32_t tag, uint32_t count, const ACameraMetadata_rational* data);
+
+    camera_status_t getTags(/*out*/int32_t* numTags,
+                            /*out*/const uint32_t** tags) const;
+
+    bool isNdkSupportedCapability(const int32_t capability);
+    static inline bool isVendorTag(const uint32_t tag);
+    static bool isCaptureRequestTag(const uint32_t tag);
+    void filterUnsupportedFeatures(); // Hide features not yet supported by NDK
+    void filterStreamConfigurations(); // Hide input streams, translate hal format to NDK formats
+
+    const CameraMetadata& getInternalData();
+
+    template<typename INTERNAL_T, typename NDK_T>
+    camera_status_t updateImpl(uint32_t tag, uint32_t count, const NDK_T* data) {
+        if (mType != ACM_REQUEST) {
+            ALOGE("Error: Write to metadata is only allowed for capture request!");
+            return ACAMERA_ERROR_INVALID_PARAMETER;
+        }
+        if (!isCaptureRequestTag(tag)) {
+            ALOGE("Error: tag %d is not writable!", tag);
+            return ACAMERA_ERROR_INVALID_PARAMETER;
+        }
+
+        Mutex::Autolock _l(mLock);
+
+        status_t ret = OK;
+        if (count == 0 && data == nullptr) {
+            ret = mData.erase(tag);
+        } else {
+            // Here we have to use reinterpret_cast because the NDK data type is
+            // exact copy of internal data type but they do not inherit from each other
+            ret = mData.update(tag, reinterpret_cast<const INTERNAL_T*>(data), count);
+        }
+
+        if (ret == OK) {
+            mTags.clear();
+            return ACAMERA_OK;
+        } else {
+            return ACAMERA_ERROR_INVALID_PARAMETER;
+        }
+    }
+
+  private:
+    // guard access of public APIs: get/update/getTags
+    mutable Mutex    mLock;
+    CameraMetadata   mData;
+    mutable Vector<uint32_t> mTags; // updated in getTags, cleared by update
+    const ACAMERA_METADATA_TYPE mType;
+
+    static std::unordered_set<uint32_t> sSystemTags;
+};
+
+#endif // _ACAMERA_METADATA_H
diff --git a/camera/ndk/impl/ACaptureRequest.h b/camera/ndk/impl/ACaptureRequest.h
new file mode 100644
index 0000000..6bd8406
--- /dev/null
+++ b/camera/ndk/impl/ACaptureRequest.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _ACAPTURE_REQUEST_H
+#define _ACAPTURE_REQUEST_H
+
+#include "NdkCaptureRequest.h"
+#include <set>
+
+using namespace android;
+
+struct ACameraOutputTarget {
+    ACameraOutputTarget(ANativeWindow* window) : mWindow(window) {};
+
+    bool operator == (const ACameraOutputTarget& other) const {
+        return mWindow == other.mWindow;
+    }
+    bool operator != (const ACameraOutputTarget& other) const {
+        return mWindow != other.mWindow;
+    }
+    bool operator < (const ACameraOutputTarget& other) const {
+        return mWindow < other.mWindow;
+    }
+    bool operator > (const ACameraOutputTarget& other) const {
+        return mWindow > other.mWindow;
+    }
+
+    ANativeWindow* mWindow;
+};
+
+struct ACameraOutputTargets {
+    std::set<ACameraOutputTarget> mOutputs;
+};
+
+struct ACaptureRequest {
+    ACameraMetadata*      settings;
+    ACameraOutputTargets* targets;
+};
+
+#endif // _ACAPTURE_REQUEST_H
diff --git a/camera/tests/Android.mk b/camera/tests/Android.mk
index 3777d94..8019999 100644
--- a/camera/tests/Android.mk
+++ b/camera/tests/Android.mk
@@ -32,14 +32,11 @@
 	libbinder
 
 LOCAL_C_INCLUDES += \
-	system/media/camera/include \
 	system/media/private/camera/include \
 	system/media/camera/tests \
 	frameworks/av/services/camera/libcameraservice \
-	frameworks/av/include/camera \
-	frameworks/native/include \
 
-LOCAL_CFLAGS += -Wall -Wextra
+LOCAL_CFLAGS += -Wall -Wextra -Werror
 
 LOCAL_MODULE:= camera_client_test
 LOCAL_MODULE_TAGS := tests
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 572fb72..828a758 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -32,12 +32,15 @@
 #include <hardware/gralloc.h>
 
 #include <camera/CameraMetadata.h>
-#include <camera/ICameraService.h>
-#include <camera/ICameraServiceListener.h>
+#include <android/hardware/ICameraService.h>
+#include <android/hardware/ICameraServiceListener.h>
+#include <android/hardware/BnCameraServiceListener.h>
+#include <android/hardware/camera2/ICameraDeviceUser.h>
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+#include <android/hardware/camera2/BnCameraDeviceCallbacks.h>
 #include <camera/camera2/CaptureRequest.h>
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
 #include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SubmitInfo.h>
 
 #include <gui/BufferItemConsumer.h>
 #include <gui/IGraphicBufferProducer.h>
@@ -60,25 +63,27 @@
 #define IDLE_TIMEOUT 2000000000 // ns
 
 // Stub listener implementation
-class TestCameraServiceListener : public BnCameraServiceListener {
-    std::map<String16, TorchStatus> mCameraTorchStatuses;
-    std::map<int32_t, Status> mCameraStatuses;
+class TestCameraServiceListener : public hardware::BnCameraServiceListener {
+    std::map<String16, int32_t> mCameraTorchStatuses;
+    std::map<int32_t, int32_t> mCameraStatuses;
     mutable Mutex mLock;
     mutable Condition mCondition;
     mutable Condition mTorchCondition;
 public:
     virtual ~TestCameraServiceListener() {};
 
-    virtual void onStatusChanged(Status status, int32_t cameraId) {
+    virtual binder::Status onStatusChanged(int32_t status, int32_t cameraId) {
         Mutex::Autolock l(mLock);
         mCameraStatuses[cameraId] = status;
         mCondition.broadcast();
+        return binder::Status::ok();
     };
 
-    virtual void onTorchStatusChanged(TorchStatus status, const String16& cameraId) {
+    virtual binder::Status onTorchStatusChanged(int32_t status, const String16& cameraId) {
         Mutex::Autolock l(mLock);
         mCameraTorchStatuses[cameraId] = status;
         mTorchCondition.broadcast();
+        return binder::Status::ok();
     };
 
     bool waitForNumCameras(size_t num) const {
@@ -96,7 +101,7 @@
         return true;
     };
 
-    bool waitForTorchState(TorchStatus status, int32_t cameraId) const {
+    bool waitForTorchState(int32_t status, int32_t cameraId) const {
         Mutex::Autolock l(mLock);
 
         const auto& iter = mCameraTorchStatuses.find(String16(String8::format("%d", cameraId)));
@@ -116,27 +121,27 @@
         return true;
     };
 
-    TorchStatus getTorchStatus(int32_t cameraId) const {
+    int32_t getTorchStatus(int32_t cameraId) const {
         Mutex::Autolock l(mLock);
         const auto& iter = mCameraTorchStatuses.find(String16(String8::format("%d", cameraId)));
         if (iter == mCameraTorchStatuses.end()) {
-            return ICameraServiceListener::TORCH_STATUS_UNKNOWN;
+            return hardware::ICameraServiceListener::TORCH_STATUS_UNKNOWN;
         }
         return iter->second;
     };
 
-    Status getStatus(int32_t cameraId) const {
+    int32_t getStatus(int32_t cameraId) const {
         Mutex::Autolock l(mLock);
         const auto& iter = mCameraStatuses.find(cameraId);
         if (iter == mCameraStatuses.end()) {
-            return ICameraServiceListener::STATUS_UNKNOWN;
+            return hardware::ICameraServiceListener::STATUS_UNKNOWN;
         }
         return iter->second;
     };
 };
 
 // Callback implementation
-class TestCameraDeviceCallbacks : public BnCameraDeviceCallbacks {
+class TestCameraDeviceCallbacks : public hardware::camera2::BnCameraDeviceCallbacks {
 public:
     enum Status {
         IDLE,
@@ -144,13 +149,14 @@
         PREPARED,
         RUNNING,
         SENT_RESULT,
-        UNINITIALIZED
+        UNINITIALIZED,
+        REPEATING_REQUEST_ERROR,
     };
 
 protected:
     bool mError;
-    Status mLastStatus;
-    mutable std::vector<Status> mStatusesHit;
+    int32_t mLastStatus;
+    mutable std::vector<int32_t> mStatusesHit;
     mutable Mutex mLock;
     mutable Condition mStatusCondition;
 public:
@@ -158,45 +164,65 @@
 
     virtual ~TestCameraDeviceCallbacks() {}
 
-    virtual void onDeviceError(CameraErrorCode errorCode,
+    virtual binder::Status onDeviceError(int errorCode,
             const CaptureResultExtras& resultExtras) {
+        (void) resultExtras;
         ALOGE("%s: onDeviceError occurred with: %d", __FUNCTION__, static_cast<int>(errorCode));
         Mutex::Autolock l(mLock);
         mError = true;
         mLastStatus = ERROR;
         mStatusesHit.push_back(mLastStatus);
         mStatusCondition.broadcast();
+        return binder::Status::ok();
     }
 
-    virtual void onDeviceIdle() {
+    virtual binder::Status onDeviceIdle() {
         Mutex::Autolock l(mLock);
         mLastStatus = IDLE;
         mStatusesHit.push_back(mLastStatus);
         mStatusCondition.broadcast();
+        return binder::Status::ok();
     }
 
-    virtual void onCaptureStarted(const CaptureResultExtras& resultExtras,
+    virtual binder::Status onCaptureStarted(const CaptureResultExtras& resultExtras,
             int64_t timestamp) {
+        (void) resultExtras;
+        (void) timestamp;
         Mutex::Autolock l(mLock);
         mLastStatus = RUNNING;
         mStatusesHit.push_back(mLastStatus);
         mStatusCondition.broadcast();
+        return binder::Status::ok();
     }
 
 
-    virtual void onResultReceived(const CameraMetadata& metadata,
+    virtual binder::Status onResultReceived(const CameraMetadata& metadata,
             const CaptureResultExtras& resultExtras) {
+        (void) metadata;
+        (void) resultExtras;
         Mutex::Autolock l(mLock);
         mLastStatus = SENT_RESULT;
         mStatusesHit.push_back(mLastStatus);
         mStatusCondition.broadcast();
+        return binder::Status::ok();
     }
 
-    virtual void onPrepared(int streamId) {
+    virtual binder::Status onPrepared(int streamId) {
+        (void) streamId;
         Mutex::Autolock l(mLock);
         mLastStatus = PREPARED;
         mStatusesHit.push_back(mLastStatus);
         mStatusCondition.broadcast();
+        return binder::Status::ok();
+    }
+
+    virtual binder::Status onRepeatingRequestError(int64_t lastFrameNumber) {
+        (void) lastFrameNumber;
+        Mutex::Autolock l(mLock);
+        mLastStatus = REPEATING_REQUEST_ERROR;
+        mStatusesHit.push_back(mLastStatus);
+        mStatusCondition.broadcast();
+        return binder::Status::ok();
     }
 
     // Test helper functions:
@@ -237,95 +263,132 @@
 
 };
 
+namespace {
+    Mutex                     gLock;
+    class DeathNotifier : public IBinder::DeathRecipient
+    {
+    public:
+        DeathNotifier() {}
+
+        virtual void binderDied(const wp<IBinder>& /*who*/) {
+            ALOGV("binderDied");
+            Mutex::Autolock _l(gLock);
+            ALOGW("Camera service died!");
+        }
+    };
+    sp<DeathNotifier>         gDeathNotifier;
+}; // anonymous namespace
+
 // Exercise basic binder calls for the camera service
 TEST(CameraServiceBinderTest, CheckBinderCameraService) {
     ProcessState::self()->startThreadPool();
     sp<IServiceManager> sm = defaultServiceManager();
     sp<IBinder> binder = sm->getService(String16("media.camera"));
     ASSERT_NOT_NULL(binder);
-    sp<ICameraService> service = interface_cast<ICameraService>(binder);
+    if (gDeathNotifier == NULL) {
+        gDeathNotifier = new DeathNotifier();
+    }
+    binder->linkToDeath(gDeathNotifier);
+    sp<hardware::ICameraService> service =
+            interface_cast<hardware::ICameraService>(binder);
 
+    binder::Status res;
 
-    int32_t numCameras = service->getNumberOfCameras();
+    int32_t numCameras = 0;
+    res = service->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_ALL, &numCameras);
+    EXPECT_TRUE(res.isOk()) << res;
     EXPECT_LE(0, numCameras);
 
     // Check listener binder calls
     sp<TestCameraServiceListener> listener(new TestCameraServiceListener());
-    EXPECT_EQ(OK, service->addListener(listener));
+    res = service->addListener(listener);
+    EXPECT_TRUE(res.isOk()) << res;
 
     EXPECT_TRUE(listener->waitForNumCameras(numCameras));
 
     for (int32_t i = 0; i < numCameras; i++) {
+        bool isSupported = false;
+        res = service->supportsCameraApi(i,
+                hardware::ICameraService::API_VERSION_2, &isSupported);
+        EXPECT_TRUE(res.isOk()) << res;
+
         // We only care about binder calls for the Camera2 API.  Camera1 is deprecated.
-        status_t camera2Support = service->supportsCameraApi(i, ICameraService::API_VERSION_2);
-        if (camera2Support != OK) {
-            EXPECT_EQ(-EOPNOTSUPP, camera2Support);
+        if (!isSupported) {
             continue;
         }
 
         // Check metadata binder call
         CameraMetadata metadata;
-        EXPECT_EQ(OK, service->getCameraCharacteristics(i, &metadata));
+        res = service->getCameraCharacteristics(i, &metadata);
+        EXPECT_TRUE(res.isOk()) << res;
         EXPECT_FALSE(metadata.isEmpty());
 
         // Make sure we're available, or skip device tests otherwise
-        ICameraServiceListener::Status s = listener->getStatus(i);
-        EXPECT_EQ(ICameraServiceListener::STATUS_AVAILABLE, s);
-        if (s != ICameraServiceListener::STATUS_AVAILABLE) {
+        int32_t s = listener->getStatus(i);
+        EXPECT_EQ(::android::hardware::ICameraServiceListener::STATUS_PRESENT, s);
+        if (s != ::android::hardware::ICameraServiceListener::STATUS_PRESENT) {
             continue;
         }
 
         // Check connect binder calls
         sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
-        sp<ICameraDeviceUser> device;
-        EXPECT_EQ(OK, service->connectDevice(callbacks, i, String16("meeeeeeeee!"),
-                ICameraService::USE_CALLING_UID, /*out*/device));
+        sp<hardware::camera2::ICameraDeviceUser> device;
+        res = service->connectDevice(callbacks, i, String16("meeeeeeeee!"),
+                hardware::ICameraService::USE_CALLING_UID, /*out*/&device);
+        EXPECT_TRUE(res.isOk()) << res;
         ASSERT_NE(nullptr, device.get());
         device->disconnect();
         EXPECT_FALSE(callbacks->hadError());
 
-        ICameraServiceListener::TorchStatus torchStatus = listener->getTorchStatus(i);
-        if (torchStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF) {
+        int32_t torchStatus = listener->getTorchStatus(i);
+        if (torchStatus == hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF) {
             // Check torch calls
-            EXPECT_EQ(OK, service->setTorchMode(String16(String8::format("%d", i)),
-                    /*enabled*/true, callbacks));
+            res = service->setTorchMode(String16(String8::format("%d", i)),
+                    /*enabled*/true, callbacks);
+            EXPECT_TRUE(res.isOk()) << res;
             EXPECT_TRUE(listener->waitForTorchState(
-                    ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON, i));
-            EXPECT_EQ(OK, service->setTorchMode(String16(String8::format("%d", i)),
-                    /*enabled*/false, callbacks));
+                    hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON, i));
+            res = service->setTorchMode(String16(String8::format("%d", i)),
+                    /*enabled*/false, callbacks);
+            EXPECT_TRUE(res.isOk()) << res;
             EXPECT_TRUE(listener->waitForTorchState(
-                    ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF, i));
+                    hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF, i));
         }
     }
 
-    EXPECT_EQ(OK, service->removeListener(listener));
+    res = service->removeListener(listener);
+    EXPECT_TRUE(res.isOk()) << res;
 }
 
 // Test fixture for client focused binder tests
 class CameraClientBinderTest : public testing::Test {
 protected:
-    sp<ICameraService> service;
+    sp<hardware::ICameraService> service;
     int32_t numCameras;
-    std::vector<std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>>> openDeviceList;
+    std::vector<std::pair<sp<TestCameraDeviceCallbacks>, sp<hardware::camera2::ICameraDeviceUser>>>
+            openDeviceList;
     sp<TestCameraServiceListener> serviceListener;
 
-    std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>> openNewDevice(int deviceId) {
-
+    std::pair<sp<TestCameraDeviceCallbacks>, sp<hardware::camera2::ICameraDeviceUser>>
+            openNewDevice(int deviceId) {
         sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
-        sp<ICameraDeviceUser> device;
+        sp<hardware::camera2::ICameraDeviceUser> device;
         {
             SCOPED_TRACE("openNewDevice");
-            EXPECT_EQ(OK, service->connectDevice(callbacks, deviceId, String16("meeeeeeeee!"),
-                    ICameraService::USE_CALLING_UID, /*out*/device));
+            binder::Status res = service->connectDevice(callbacks, deviceId, String16("meeeeeeeee!"),
+                    hardware::ICameraService::USE_CALLING_UID, /*out*/&device);
+            EXPECT_TRUE(res.isOk()) << res;
         }
         auto p = std::make_pair(callbacks, device);
         openDeviceList.push_back(p);
         return p;
     }
 
-    void closeDevice(std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>>& p) {
+    void closeDevice(std::pair<sp<TestCameraDeviceCallbacks>,
+            sp<hardware::camera2::ICameraDeviceUser>>& p) {
         if (p.second.get() != nullptr) {
-            p.second->disconnect();
+            binder::Status res = p.second->disconnect();
+            EXPECT_TRUE(res.isOk()) << res;
             {
                 SCOPED_TRACE("closeDevice");
                 EXPECT_FALSE(p.first->hadError());
@@ -341,10 +404,11 @@
         ProcessState::self()->startThreadPool();
         sp<IServiceManager> sm = defaultServiceManager();
         sp<IBinder> binder = sm->getService(String16("media.camera"));
-        service = interface_cast<ICameraService>(binder);
+        service = interface_cast<hardware::ICameraService>(binder);
         serviceListener = new TestCameraServiceListener();
         service->addListener(serviceListener);
-        numCameras = service->getNumberOfCameras();
+        service->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_BACKWARD_COMPATIBLE,
+                &numCameras);
     }
 
     virtual void TearDown() {
@@ -359,19 +423,19 @@
 
 TEST_F(CameraClientBinderTest, CheckBinderCameraDeviceUser) {
     ASSERT_NOT_NULL(service);
-
     EXPECT_TRUE(serviceListener->waitForNumCameras(numCameras));
     for (int32_t i = 0; i < numCameras; i++) {
         // Make sure we're available, or skip device tests otherwise
-        ICameraServiceListener::Status s = serviceListener->getStatus(i);
-        EXPECT_EQ(ICameraServiceListener::STATUS_AVAILABLE, s);
-        if (s != ICameraServiceListener::STATUS_AVAILABLE) {
+        int32_t s = serviceListener->getStatus(i);
+        EXPECT_EQ(hardware::ICameraServiceListener::STATUS_PRESENT, s);
+        if (s != hardware::ICameraServiceListener::STATUS_PRESENT) {
             continue;
         }
+        binder::Status res;
 
         auto p = openNewDevice(i);
         sp<TestCameraDeviceCallbacks> callbacks = p.first;
-        sp<ICameraDeviceUser> device = p.second;
+        sp<hardware::camera2::ICameraDeviceUser> device = p.second;
 
         // Setup a buffer queue; I'm just using the vendor opaque format here as that is
         // guaranteed to be present
@@ -392,50 +456,65 @@
         OutputConfiguration output(gbProducer, /*rotation*/0);
 
         // Can we configure?
-        EXPECT_EQ(OK, device->beginConfigure());
-        status_t streamId = device->createStream(output);
+        res = device->beginConfigure();
+        EXPECT_TRUE(res.isOk()) << res;
+        status_t streamId;
+        res = device->createStream(output, &streamId);
+        EXPECT_TRUE(res.isOk()) << res;
         EXPECT_LE(0, streamId);
-        EXPECT_EQ(OK, device->endConfigure());
+        res = device->endConfigure(/*isConstrainedHighSpeed*/ false);
+        EXPECT_TRUE(res.isOk()) << res;
         EXPECT_FALSE(callbacks->hadError());
 
         // Can we make requests?
         CameraMetadata requestTemplate;
-        EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
-                /*out*/&requestTemplate));
-        sp<CaptureRequest> request(new CaptureRequest());
-        request->mMetadata = requestTemplate;
-        request->mSurfaceList.add(surface);
-        request->mIsReprocess = false;
+        res = device->createDefaultRequest(/*preview template*/1,
+                /*out*/&requestTemplate);
+        EXPECT_TRUE(res.isOk()) << res;
+
+        hardware::camera2::CaptureRequest request;
+        request.mMetadata = requestTemplate;
+        request.mSurfaceList.add(surface);
+        request.mIsReprocess = false;
         int64_t lastFrameNumber = 0;
         int64_t lastFrameNumberPrev = 0;
         callbacks->clearStatus();
-        int requestId = device->submitRequest(request, /*streaming*/true, /*out*/&lastFrameNumber);
+
+        hardware::camera2::utils::SubmitInfo info;
+        res = device->submitRequest(request, /*streaming*/true, /*out*/&info);
+        EXPECT_TRUE(res.isOk()) << res;
         EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
-        EXPECT_LE(0, requestId);
+        EXPECT_LE(0, info.mRequestId);
 
         // Can we stop requests?
-        EXPECT_EQ(OK, device->cancelRequest(requestId, /*out*/&lastFrameNumber));
+        res = device->cancelRequest(info.mRequestId, /*out*/&lastFrameNumber);
+        EXPECT_TRUE(res.isOk()) << res;
         EXPECT_TRUE(callbacks->waitForIdle());
         EXPECT_FALSE(callbacks->hadError());
 
         // Can we do it again?
-        lastFrameNumberPrev = lastFrameNumber;
+        lastFrameNumberPrev = info.mLastFrameNumber;
         lastFrameNumber = 0;
         requestTemplate.clear();
-        EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
-                /*out*/&requestTemplate));
-        sp<CaptureRequest> request2(new CaptureRequest());
-        request2->mMetadata = requestTemplate;
-        request2->mSurfaceList.add(surface);
-        request2->mIsReprocess = false;
+        res = device->createDefaultRequest(hardware::camera2::ICameraDeviceUser::TEMPLATE_PREVIEW,
+                /*out*/&requestTemplate);
+        EXPECT_TRUE(res.isOk()) << res;
+        hardware::camera2::CaptureRequest request2;
+        request2.mMetadata = requestTemplate;
+        request2.mSurfaceList.add(surface);
+        request2.mIsReprocess = false;
         callbacks->clearStatus();
-        int requestId2 = device->submitRequest(request2, /*streaming*/true,
-                /*out*/&lastFrameNumber);
-        EXPECT_EQ(-1, lastFrameNumber);
+        hardware::camera2::utils::SubmitInfo info2;
+        res = device->submitRequest(request2, /*streaming*/true,
+                /*out*/&info2);
+        EXPECT_TRUE(res.isOk()) << res;
+        EXPECT_EQ(hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES,
+                info2.mLastFrameNumber);
         lastFrameNumber = 0;
         EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
-        EXPECT_LE(0, requestId2);
-        EXPECT_EQ(OK, device->cancelRequest(requestId2, /*out*/&lastFrameNumber));
+        EXPECT_LE(0, info2.mRequestId);
+        res = device->cancelRequest(info2.mRequestId, /*out*/&lastFrameNumber);
+        EXPECT_TRUE(res.isOk()) << res;
         EXPECT_TRUE(callbacks->waitForIdle());
         EXPECT_LE(lastFrameNumberPrev, lastFrameNumber);
         sleep(/*second*/1); // allow some time for errors to show up, if any
@@ -446,35 +525,44 @@
         lastFrameNumber = 0;
         requestTemplate.clear();
         CameraMetadata requestTemplate2;
-        EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
-                /*out*/&requestTemplate));
-        EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
-                /*out*/&requestTemplate2));
-        sp<CaptureRequest> request3(new CaptureRequest());
-        sp<CaptureRequest> request4(new CaptureRequest());
-        request3->mMetadata = requestTemplate;
-        request3->mSurfaceList.add(surface);
-        request3->mIsReprocess = false;
-        request4->mMetadata = requestTemplate2;
-        request4->mSurfaceList.add(surface);
-        request4->mIsReprocess = false;
-        List<sp<CaptureRequest>> requestList;
+        res = device->createDefaultRequest(hardware::camera2::ICameraDeviceUser::TEMPLATE_PREVIEW,
+                /*out*/&requestTemplate);
+        EXPECT_TRUE(res.isOk()) << res;
+        res = device->createDefaultRequest(hardware::camera2::ICameraDeviceUser::TEMPLATE_PREVIEW,
+                /*out*/&requestTemplate2);
+        EXPECT_TRUE(res.isOk()) << res;
+        android::hardware::camera2::CaptureRequest request3;
+        android::hardware::camera2::CaptureRequest request4;
+        request3.mMetadata = requestTemplate;
+        request3.mSurfaceList.add(surface);
+        request3.mIsReprocess = false;
+        request4.mMetadata = requestTemplate2;
+        request4.mSurfaceList.add(surface);
+        request4.mIsReprocess = false;
+        std::vector<hardware::camera2::CaptureRequest> requestList;
         requestList.push_back(request3);
         requestList.push_back(request4);
 
         callbacks->clearStatus();
-        int requestId3 = device->submitRequestList(requestList, /*streaming*/false,
-                /*out*/&lastFrameNumber);
+        hardware::camera2::utils::SubmitInfo info3;
+        res = device->submitRequestList(requestList, /*streaming*/false,
+                /*out*/&info3);
+        EXPECT_TRUE(res.isOk()) << res;
+        EXPECT_LE(0, info3.mRequestId);
         EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
         EXPECT_TRUE(callbacks->waitForIdle());
-        EXPECT_LE(lastFrameNumberPrev, lastFrameNumber);
+        EXPECT_LE(lastFrameNumberPrev, info3.mLastFrameNumber);
         sleep(/*second*/1); // allow some time for errors to show up, if any
         EXPECT_FALSE(callbacks->hadError());
 
         // Can we unconfigure?
-        EXPECT_EQ(OK, device->beginConfigure());
-        EXPECT_EQ(OK, device->deleteStream(streamId));
-        EXPECT_EQ(OK, device->endConfigure());
+        res = device->beginConfigure();
+        EXPECT_TRUE(res.isOk()) << res;
+        res = device->deleteStream(streamId);
+        EXPECT_TRUE(res.isOk()) << res;
+        res = device->endConfigure(/*isConstrainedHighSpeed*/ false);
+        EXPECT_TRUE(res.isOk()) << res;
+
         sleep(/*second*/1); // allow some time for errors to show up, if any
         EXPECT_FALSE(callbacks->hadError());
 
diff --git a/camera/tests/VendorTagDescriptorTests.cpp b/camera/tests/VendorTagDescriptorTests.cpp
index 9082dbf..75cfb73 100644
--- a/camera/tests/VendorTagDescriptorTests.cpp
+++ b/camera/tests/VendorTagDescriptorTests.cpp
@@ -53,27 +53,27 @@
 
 extern "C" {
 
-static int zero_get_tag_count(const vendor_tag_ops_t* vOps) {
+static int zero_get_tag_count(const vendor_tag_ops_t*) {
     return 0;
 }
 
-static int default_get_tag_count(const vendor_tag_ops_t* vOps) {
+static int default_get_tag_count(const vendor_tag_ops_t*) {
     return VENDOR_TAG_COUNT_ERR;
 }
 
-static void default_get_all_tags(const vendor_tag_ops_t* vOps, uint32_t* tagArray) {
+static void default_get_all_tags(const vendor_tag_ops_t*, uint32_t*) {
     //Noop
 }
 
-static const char* default_get_section_name(const vendor_tag_ops_t* vOps, uint32_t tag) {
+static const char* default_get_section_name(const vendor_tag_ops_t*, uint32_t) {
     return VENDOR_SECTION_NAME_ERR;
 }
 
-static const char* default_get_tag_name(const vendor_tag_ops_t* vOps, uint32_t tag) {
+static const char* default_get_tag_name(const vendor_tag_ops_t*, uint32_t) {
     return VENDOR_TAG_NAME_ERR;
 }
 
-static int default_get_tag_type(const vendor_tag_ops_t* vOps, uint32_t tag) {
+static int default_get_tag_type(const vendor_tag_ops_t*, uint32_t) {
     return VENDOR_TAG_TYPE_ERR;
 }
 
@@ -141,7 +141,8 @@
     // Check whether parcel read/write succeed
     EXPECT_EQ(OK, vDescOriginal->writeToParcel(&p));
     p.setDataPosition(0);
-    ASSERT_EQ(OK, VendorTagDescriptor::createFromParcel(&p, vDescParceled));
+
+    ASSERT_EQ(OK, vDescParceled->readFromParcel(&p));
 
     // Ensure consistent tag count
     int tagCount = vDescOriginal->getTagCount();
@@ -197,7 +198,6 @@
     EXPECT_EQ(VENDOR_TAG_TYPE_ERR, vDesc->getTagType(BAD_TAG));
 
     // Make sure global can be set/cleared
-    const vendor_tag_ops_t *fakeOps = &fakevendor_ops;
     sp<VendorTagDescriptor> prevGlobal = VendorTagDescriptor::getGlobalVendorTagDescriptor();
     VendorTagDescriptor::clearGlobalVendorTagDescriptor();
 
@@ -208,4 +208,3 @@
     EXPECT_EQ(OK, VendorTagDescriptor::setAsGlobalVendorTagDescriptor(prevGlobal));
     EXPECT_EQ(prevGlobal, VendorTagDescriptor::getGlobalVendorTagDescriptor());
 }
-
diff --git a/cmds/screenrecord/FrameOutput.cpp b/cmds/screenrecord/FrameOutput.cpp
index bef74f5..ee7ace6 100644
--- a/cmds/screenrecord/FrameOutput.cpp
+++ b/cmds/screenrecord/FrameOutput.cpp
@@ -74,7 +74,7 @@
                 GL_TEXTURE_EXTERNAL_OES, true, false);
     mGlConsumer->setName(String8("virtual display"));
     mGlConsumer->setDefaultBufferSize(width, height);
-    mGlConsumer->setDefaultMaxBufferCount(5);
+    producer->setMaxDequeuedBufferCount(4);
     mGlConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_TEXTURE);
 
     mGlConsumer->setFrameAvailableListener(this);
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index c659170..9fd192c 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -176,7 +176,7 @@
                 GL_TEXTURE_EXTERNAL_OES, true, false);
     mGlConsumer->setName(String8("virtual display"));
     mGlConsumer->setDefaultBufferSize(width, height);
-    mGlConsumer->setDefaultMaxBufferCount(5);
+    mProducer->setMaxDequeuedBufferCount(4);
     mGlConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_TEXTURE);
 
     mGlConsumer->setFrameAvailableListener(this);
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 20c0094..9e15a81 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -9,7 +9,7 @@
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright libmedia libutils libbinder libstagefright_foundation \
-        libjpeg libgui libcutils liblog
+	libjpeg libgui libcutils liblog
 
 LOCAL_C_INCLUDES:= \
 	frameworks/av/media/libstagefright \
@@ -31,15 +31,16 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=         \
-        SineSource.cpp    \
-        record.cpp
+	SineSource.cpp    \
+	record.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder libstagefright_foundation
+	libstagefright libmedia liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
 	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax
+	$(TOP)/frameworks/native/include/media/openmax \
+	$(TOP)/frameworks/native/include/media/hardware
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 LOCAL_CLANG := true
@@ -55,15 +56,16 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=         \
-        SineSource.cpp    \
-        recordvideo.cpp
+	SineSource.cpp    \
+	recordvideo.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder libstagefright_foundation
+	libstagefright libmedia liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
 	frameworks/av/media/libstagefright \
-	$(TOP)/frameworks/native/include/media/openmax
+	$(TOP)/frameworks/native/include/media/openmax \
+	$(TOP)/frameworks/native/include/media/hardware
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 LOCAL_CLANG := true
@@ -80,11 +82,11 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=         \
-        SineSource.cpp    \
-        audioloop.cpp
+	SineSource.cpp    \
+	audioloop.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder libstagefright_foundation
+	libstagefright libmedia liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
 	frameworks/av/media/libstagefright \
@@ -108,7 +110,7 @@
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright liblog libutils libbinder libgui \
-        libstagefright_foundation libmedia libcutils
+	libstagefright_foundation libmedia libcutils
 
 LOCAL_C_INCLUDES:= \
 	frameworks/av/media/libstagefright \
@@ -128,11 +130,11 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=         \
-        sf2.cpp    \
+	sf2.cpp    \
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright liblog libutils libbinder libstagefright_foundation \
-        libmedia libgui libcutils libui
+	libmedia libgui libcutils libui
 
 LOCAL_C_INCLUDES:= \
 	frameworks/av/media/libstagefright \
@@ -152,12 +154,12 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=               \
-        codec.cpp               \
-        SimplePlayer.cpp        \
+	codec.cpp               \
+	SimplePlayer.cpp        \
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright liblog libutils libbinder libstagefright_foundation \
-        libmedia libgui libcutils libui
+	libmedia libgui libcutils libui
 
 LOCAL_C_INCLUDES:= \
 	frameworks/av/media/libstagefright \
@@ -220,11 +222,11 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=               \
-        muxer.cpp            \
+	muxer.cpp            \
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright liblog libutils libbinder libstagefright_foundation \
-        libmedia libgui libcutils libui libc
+	libmedia libgui libcutils libui libc
 
 LOCAL_C_INCLUDES:= \
 	frameworks/av/media/libstagefright \
diff --git a/cmds/stagefright/SineSource.cpp b/cmds/stagefright/SineSource.cpp
index 587077a..cad8caf 100644
--- a/cmds/stagefright/SineSource.cpp
+++ b/cmds/stagefright/SineSource.cpp
@@ -53,6 +53,7 @@
     meta->setInt32(kKeyChannelCount, mNumChannels);
     meta->setInt32(kKeySampleRate, mSampleRate);
     meta->setInt32(kKeyMaxInputSize, kBufferSize);
+    meta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
 
     return meta;
 }
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 6e9e6ec..ed44b4d 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -23,13 +23,14 @@
 #include <binder/ProcessState.h>
 #include <media/mediarecorder.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/AMRWriter.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/AudioSource.h>
+#include <media/stagefright/MediaCodecSource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/SimpleDecodingSource.h>
 #include "SineSource.h"
 
 using namespace android;
@@ -79,8 +80,6 @@
     const int32_t kBitRate = outputWBAMR ? 16000 : 8000;
 
     android::ProcessState::self()->startThreadPool();
-    OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
     sp<MediaSource> source;
 
     if (useMic) {
@@ -95,24 +94,25 @@
         source = new SineSource(kSampleRate, channels);
     }
 
-    sp<MetaData> meta = new MetaData;
-    meta->setCString(
-            kKeyMIMEType,
+    sp<AMessage> meta = new AMessage;
+    meta->setString(
+            "mime",
             outputWBAMR ? MEDIA_MIMETYPE_AUDIO_AMR_WB
                     : MEDIA_MIMETYPE_AUDIO_AMR_NB);
 
-    meta->setInt32(kKeyChannelCount, channels);
-    meta->setInt32(kKeySampleRate, kSampleRate);
-    meta->setInt32(kKeyBitRate, kBitRate);
+    meta->setInt32("channel-count", channels);
+    meta->setInt32("sample-rate", kSampleRate);
+    meta->setInt32("bitrate", kBitRate);
     int32_t maxInputSize;
     if (source->getFormat()->findInt32(kKeyMaxInputSize, &maxInputSize)) {
-        meta->setInt32(kKeyMaxInputSize, maxInputSize);
+        meta->setInt32("max-input-size", maxInputSize);
     }
 
-    sp<MediaSource> encoder = OMXCodec::Create(
-            client.interface(),
-            meta, true /* createEncoder */,
-            source);
+    sp<ALooper> looper = new ALooper;
+    looper->setName("audioloop");
+    looper->start();
+
+    sp<IMediaSource> encoder = MediaCodecSource::Create(looper, meta, source);
 
     if (fileOut != NULL) {
         // target file specified, write encoded AMR output
@@ -128,17 +128,15 @@
         writer->stop();
     } else {
         // otherwise decode to speaker
-        sp<MediaSource> decoder = OMXCodec::Create(
-                client.interface(),
-                meta, false /* createEncoder */,
-                encoder);
+        sp<IMediaSource> decoder = SimpleDecodingSource::Create(encoder);
 
         if (playToSpeaker) {
             AudioPlayer *player = new AudioPlayer(NULL);
             player->setSource(decoder);
             player->start();
             sleep(duration);
-            source->stop(); // must stop source otherwise delete player will hang
+
+            decoder.clear(); // must clear |decoder| otherwise delete player will hang.
             delete player; // there is no player->stop()...
         } else {
             CHECK_EQ(decoder->start(), (status_t)OK);
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index 36fa3b5..0a3bdf3 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -43,6 +43,7 @@
     fprintf(stderr, "       -h help\n");
     fprintf(stderr, "       -a use audio\n");
     fprintf(stderr, "       -v use video\n");
+    fprintf(stderr, "       -w mux into WebM container (default is MP4)\n");
     fprintf(stderr, "       -s Time in milli-seconds when the trim should start\n");
     fprintf(stderr, "       -e Time in milli-seconds when the trim should end\n");
     fprintf(stderr, "       -o output file name. Default is /sdcard/muxeroutput.mp4\n");
@@ -60,7 +61,8 @@
         bool enableTrim,
         int trimStartTimeMs,
         int trimEndTimeMs,
-        int rotationDegrees) {
+        int rotationDegrees,
+        MediaMuxer::OutputFormat container = MediaMuxer::OUTPUT_FORMAT_MPEG_4) {
     sp<NuMediaExtractor> extractor = new NuMediaExtractor;
     if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor. %s\n", path);
@@ -80,8 +82,7 @@
         ALOGE("couldn't open file");
         return fd;
     }
-    sp<MediaMuxer> muxer = new MediaMuxer(fd,
-                                          MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+    sp<MediaMuxer> muxer = new MediaMuxer(fd, container);
     close(fd);
 
     size_t trackCount = extractor->countTracks();
@@ -237,9 +238,10 @@
     // When trimStartTimeMs and trimEndTimeMs seems valid, we turn this switch
     // to true.
     bool enableTrim = false;
+    MediaMuxer::OutputFormat container = MediaMuxer::OUTPUT_FORMAT_MPEG_4;
 
     int res;
-    while ((res = getopt(argc, argv, "h?avo:s:e:r:")) >= 0) {
+    while ((res = getopt(argc, argv, "h?avo:s:e:r:w")) >= 0) {
         switch (res) {
             case 'a':
             {
@@ -253,6 +255,12 @@
                 break;
             }
 
+            case 'w':
+            {
+                container = MediaMuxer::OUTPUT_FORMAT_WEBM;
+                break;
+            }
+
             case 'o':
             {
                 outputFileName = optarg;
@@ -318,7 +326,7 @@
     looper->start();
 
     int result = muxing(argv[0], useAudio, useVideo, outputFileName,
-                        enableTrim, trimStartTimeMs, trimEndTimeMs, rotationDegrees);
+                        enableTrim, trimStartTimeMs, trimEndTimeMs, rotationDegrees, container);
 
     looper->stop();
 
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 594c933..9aa0156 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -18,16 +18,18 @@
 
 #include <binder/ProcessState.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/CameraSource.h>
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MPEG4Writer.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/SimpleDecodingSource.h>
 #include <media/MediaPlayerInterface.h>
 
 using namespace android;
@@ -182,9 +184,6 @@
         fprintf(stderr, "input color format must be 0 (YUV420SP) or 1 (YUV420P)\n");
         return 1;
     }
-    OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
-
     status_t err = OK;
 
 #if 0
@@ -197,8 +196,7 @@
 
     sp<MetaData> meta = source->getFormat();
 
-    sp<MediaSource> decoder = OMXCodec::Create(
-            client.interface(), meta, false /* createEncoder */, source);
+    sp<MediaSource> decoder = SimpleDecodingSource::Create(source);
 
     int width, height;
     bool success = meta->findInt32(kKeyWidth, &width);
@@ -210,22 +208,21 @@
     sp<MediaSource> decoder = new DummySource(width, height, colorFormat);
 #endif
 
-    sp<MetaData> enc_meta = new MetaData;
-    // enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
-    // enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
-    enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-    enc_meta->setInt32(kKeyWidth, width);
-    enc_meta->setInt32(kKeyHeight, height);
-    enc_meta->setInt32(kKeySampleRate, kFramerate);
-    enc_meta->setInt32(kKeyBitRate, kVideoBitRate);
-    enc_meta->setInt32(kKeyStride, width);
-    enc_meta->setInt32(kKeySliceHeight, height);
-    enc_meta->setInt32(kKeyIFramesInterval, kIFramesIntervalSec);
-    enc_meta->setInt32(kKeyColorFormat, colorFormat);
+    sp<AMessage> enc_meta = new AMessage;
+    // enc_meta->setString("mime", MEDIA_MIMETYPE_VIDEO_H263);
+    // enc_meta->setString("mime", MEDIA_MIMETYPE_VIDEO_MPEG4);
+    enc_meta->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
+    enc_meta->setInt32("width", width);
+    enc_meta->setInt32("height", height);
+    enc_meta->setInt32("sample-rate", kFramerate);
+    enc_meta->setInt32("bitrate", kVideoBitRate);
+    // enc_meta->setInt32("stride", width);
+    // enc_meta->setInt32("slice-height", height);
+    enc_meta->setInt32("i-frame-interval", kIFramesIntervalSec);
+    enc_meta->setInt32("color-format", colorFormat);
 
     sp<MediaSource> encoder =
-        OMXCodec::Create(
-                client.interface(), enc_meta, true /* createEncoder */, decoder);
+        MediaCodecSource::Create(looper, format, decoder);
 
 #if 1
     sp<MPEG4Writer> writer = new MPEG4Writer("/sdcard/output.mp4");
@@ -260,7 +257,6 @@
 #endif
 
     printf("$\n");
-    client.disconnect();
 #endif
 
 #if 0
@@ -299,9 +295,6 @@
 int main(int /* argc */, char ** /* argv */) {
     android::ProcessState::self()->startThreadPool();
 
-    OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
-
     const int32_t kSampleRate = 22050;
     const int32_t kNumChannels = 2;
     sp<MediaSource> audioSource = new SineSource(kSampleRate, kNumChannels);
@@ -317,16 +310,20 @@
     player->stop();
 #endif
 
-    sp<MetaData> encMeta = new MetaData;
-    encMeta->setCString(kKeyMIMEType,
+    sp<AMessage> encMeta = new AMessage;
+    encMeta->setString("mime",
             0 ? MEDIA_MIMETYPE_AUDIO_AMR_WB : MEDIA_MIMETYPE_AUDIO_AAC);
-    encMeta->setInt32(kKeySampleRate, kSampleRate);
-    encMeta->setInt32(kKeyChannelCount, kNumChannels);
-    encMeta->setInt32(kKeyMaxInputSize, 8192);
-    encMeta->setInt32(kKeyBitRate, kAudioBitRate);
+    encMeta->setInt32("sample-rate", kSampleRate);
+    encMeta->setInt32("channel-count", kNumChannels);
+    encMeta->setInt32("max-input-size", 8192);
+    encMeta->setInt32("bitrate", kAudioBitRate);
 
-    sp<MediaSource> encoder =
-        OMXCodec::Create(client.interface(), encMeta, true, audioSource);
+    sp<ALooper> looper = new ALooper;
+    looper->setName("record");
+    looper->start();
+
+    sp<IMediaSource> encoder =
+        MediaCodecSource::Create(looper, encMeta, audioSource);
 
     encoder->start();
 
@@ -348,8 +345,6 @@
 
     encoder->stop();
 
-    client.disconnect();
-
     return 0;
 }
 #endif
diff --git a/cmds/stagefright/recordvideo.cpp b/cmds/stagefright/recordvideo.cpp
index 2ad40bd..7a3c842 100644
--- a/cmds/stagefright/recordvideo.cpp
+++ b/cmds/stagefright/recordvideo.cpp
@@ -23,15 +23,18 @@
 
 #include <binder/ProcessState.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaCodecSource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MPEG4Writer.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
 #include <media/MediaPlayerInterface.h>
 
+#include <OMX_Video.h>
+
 using namespace android;
 
 // Print usage showing how to use this utility to record videos
@@ -265,44 +268,45 @@
         }
     }
 
-    OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
-
     status_t err = OK;
     sp<MediaSource> source =
         new DummySource(width, height, nFrames, frameRateFps, colorFormat);
 
-    sp<MetaData> enc_meta = new MetaData;
+    sp<AMessage> enc_meta = new AMessage;
     switch (codec) {
         case 1:
-            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
+            enc_meta->setString("mime", MEDIA_MIMETYPE_VIDEO_MPEG4);
             break;
         case 2:
-            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
+            enc_meta->setString("mime", MEDIA_MIMETYPE_VIDEO_H263);
             break;
         default:
-            enc_meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+            enc_meta->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
             break;
     }
-    enc_meta->setInt32(kKeyWidth, width);
-    enc_meta->setInt32(kKeyHeight, height);
-    enc_meta->setInt32(kKeyFrameRate, frameRateFps);
-    enc_meta->setInt32(kKeyBitRate, bitRateBps);
-    enc_meta->setInt32(kKeyStride, width);
-    enc_meta->setInt32(kKeySliceHeight, height);
-    enc_meta->setInt32(kKeyIFramesInterval, iFramesIntervalSeconds);
-    enc_meta->setInt32(kKeyColorFormat, colorFormat);
+    enc_meta->setInt32("width", width);
+    enc_meta->setInt32("height", height);
+    enc_meta->setInt32("frame-rate", frameRateFps);
+    enc_meta->setInt32("bitrate", bitRateBps);
+    enc_meta->setInt32("stride", width);
+    enc_meta->setInt32("slice-height", height);
+    enc_meta->setInt32("i-frame-interval", iFramesIntervalSeconds);
+    enc_meta->setInt32("color-format", colorFormat);
     if (level != -1) {
-        enc_meta->setInt32(kKeyVideoLevel, level);
+        enc_meta->setInt32("level", level);
     }
     if (profile != -1) {
-        enc_meta->setInt32(kKeyVideoProfile, profile);
+        enc_meta->setInt32("profile", profile);
     }
 
-    sp<MediaSource> encoder =
-        OMXCodec::Create(
-                client.interface(), enc_meta, true /* createEncoder */, source,
-                0, preferSoftwareCodec ? OMXCodec::kPreferSoftwareCodecs : 0);
+    sp<ALooper> looper = new ALooper;
+    looper->setName("recordvideo");
+    looper->start();
+
+    sp<IMediaSource> encoder =
+        MediaCodecSource::Create(
+                looper, enc_meta, source, NULL /* consumer */,
+                preferSoftwareCodec ? MediaCodecSource::FLAG_PREFER_SOFTWARE_CODEC : 0);
 
     int fd = open(fileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
     if (fd < 0) {
@@ -321,7 +325,6 @@
     int64_t end = systemTime();
 
     fprintf(stderr, "$\n");
-    client.disconnect();
 
     if (err != OK && err != ERROR_END_OF_STREAM) {
         fprintf(stderr, "record failed: %d\n", err);
diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp
index 0d64d2f..1a4bf08 100644
--- a/cmds/stagefright/sf2.cpp
+++ b/cmds/stagefright/sf2.cpp
@@ -118,7 +118,7 @@
                     DataSource::CreateFromURI(
                             NULL /* httpService */, mURI.c_str());
 
-                sp<MediaExtractor> extractor =
+                sp<IMediaExtractor> extractor =
                     MediaExtractor::Create(dataSource);
 
                 for (size_t i = 0; i < extractor->countTracks(); ++i) {
@@ -264,7 +264,7 @@
     sp<Surface> mSurface;
     bool mRenderToSurface;
     sp<ACodec> mCodec;
-    sp<MediaSource> mSource;
+    sp<IMediaSource> mSource;
     bool mIsVorbis;
 
     Vector<sp<ABuffer> > mCSD;
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index a9c6eda..ca68722 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -31,20 +31,27 @@
 
 #include <binder/IServiceManager.h>
 #include <binder/ProcessState.h>
+#include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
+#include <media/IMediaCodecService.h>
 #include <media/IMediaPlayerService.h>
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
 #include "include/NuCachedSource2.h"
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/JPEGSource.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/SimpleDecodingSource.h>
+#include <media/stagefright/Utils.h>
 #include <media/mediametadataretriever.h>
 
 #include <media/stagefright/foundation/hexdump.h>
@@ -67,6 +74,7 @@
 static bool gPlaybackAudio;
 static bool gWriteMP4;
 static bool gDisplayHistogram;
+static bool showProgress = true;
 static String8 gWriteMP4Filename;
 
 static sp<ANativeWindow> gSurface;
@@ -130,7 +138,7 @@
     }
 }
 
-static void dumpSource(const sp<MediaSource> &source, const String8 &filename) {
+static void dumpSource(const sp<IMediaSource> &source, const String8 &filename) {
     FILE *out = fopen(filename.string(), "wb");
 
     CHECK_EQ((status_t)OK, source->start());
@@ -163,32 +171,26 @@
     out = NULL;
 }
 
-static void playSource(OMXClient *client, sp<MediaSource> &source) {
+static void playSource(sp<IMediaSource> &source) {
     sp<MetaData> meta = source->getFormat();
 
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
 
-    sp<MediaSource> rawSource;
+    sp<IMediaSource> rawSource;
     if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime)) {
         rawSource = source;
     } else {
         int flags = 0;
         if (gPreferSoftwareCodec) {
-            flags |= OMXCodec::kPreferSoftwareCodecs;
+            flags |= MediaCodecList::kPreferSoftwareCodecs;
         }
         if (gForceToUseHardwareCodec) {
             CHECK(!gPreferSoftwareCodec);
-            flags |= OMXCodec::kHardwareCodecsOnly;
+            flags |= MediaCodecList::kHardwareCodecsOnly;
         }
-        rawSource = OMXCodec::Create(
-            client->interface(), meta, false /* createEncoder */, source,
-            NULL /* matchComponentName */,
-            flags,
-            gSurface);
-
+        rawSource = SimpleDecodingSource::Create(source, flags, gSurface);
         if (rawSource == NULL) {
-            fprintf(stderr, "Failed to instantiate decoder for '%s'.\n", mime);
             return;
         }
         displayAVCProfileLevelIfPossible(meta);
@@ -338,7 +340,7 @@
                     decodeTimesUs.push(delayDecodeUs);
                 }
 
-                if ((n++ % 16) == 0) {
+                if (showProgress && (n++ % 16) == 0) {
                     printf(".");
                     fflush(stdout);
                 }
@@ -364,8 +366,10 @@
             }
         }
 
-        printf("$");
-        fflush(stdout);
+        if (showProgress) {
+            printf("$");
+            fflush(stdout);
+        }
 
         options.setSeekTo(0);
     }
@@ -397,7 +401,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 struct DetectSyncSource : public MediaSource {
-    DetectSyncSource(const sp<MediaSource> &source);
+    DetectSyncSource(const sp<IMediaSource> &source);
 
     virtual status_t start(MetaData *params = NULL);
     virtual status_t stop();
@@ -414,14 +418,14 @@
         OTHER,
     };
 
-    sp<MediaSource> mSource;
+    sp<IMediaSource> mSource;
     StreamType mStreamType;
     bool mSawFirstIDRFrame;
 
     DISALLOW_EVIL_CONSTRUCTORS(DetectSyncSource);
 };
 
-DetectSyncSource::DetectSyncSource(const sp<MediaSource> &source)
+DetectSyncSource::DetectSyncSource(const sp<IMediaSource> &source)
     : mSource(source),
       mStreamType(OTHER),
       mSawFirstIDRFrame(false) {
@@ -503,7 +507,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 static void writeSourcesToMP4(
-        Vector<sp<MediaSource> > &sources, bool syncInfoPresent) {
+        Vector<sp<IMediaSource> > &sources, bool syncInfoPresent) {
 #if 0
     sp<MPEG4Writer> writer =
         new MPEG4Writer(gWriteMP4Filename.string());
@@ -521,7 +525,7 @@
     writer->setMaxFileDuration(60000000ll);
 
     for (size_t i = 0; i < sources.size(); ++i) {
-        sp<MediaSource> source = sources.editItemAt(i);
+        sp<IMediaSource> source = sources.editItemAt(i);
 
         CHECK_EQ(writer->addSource(
                     syncInfoPresent ? source : new DetectSyncSource(source)),
@@ -538,7 +542,7 @@
     writer->stop();
 }
 
-static void performSeekTest(const sp<MediaSource> &source) {
+static void performSeekTest(const sp<IMediaSource> &source) {
     CHECK_EQ((status_t)OK, source->start());
 
     int64_t durationUs;
@@ -612,49 +616,57 @@
     fprintf(stderr, "       -k seek test\n");
     fprintf(stderr, "       -x display a histogram of decoding times/fps "
                     "(video only)\n");
+    fprintf(stderr, "       -q don't show progress indicator\n");
     fprintf(stderr, "       -S allocate buffers from a surface\n");
     fprintf(stderr, "       -T allocate buffers from a surface texture\n");
     fprintf(stderr, "       -d(ump) output_filename (raw stream data to a file)\n");
     fprintf(stderr, "       -D(ump) output_filename (decoded PCM data to a file)\n");
 }
 
-static void dumpCodecProfiles(const sp<IOMX>& omx, bool queryDecoders) {
+static void dumpCodecProfiles(bool queryDecoders) {
     const char *kMimeTypes[] = {
         MEDIA_MIMETYPE_VIDEO_AVC, MEDIA_MIMETYPE_VIDEO_MPEG4,
         MEDIA_MIMETYPE_VIDEO_H263, MEDIA_MIMETYPE_AUDIO_AAC,
         MEDIA_MIMETYPE_AUDIO_AMR_NB, MEDIA_MIMETYPE_AUDIO_AMR_WB,
         MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
         MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
-        MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9
+        MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9,
+        MEDIA_MIMETYPE_VIDEO_DOLBY_VISION
     };
 
     const char *codecType = queryDecoders? "decoder" : "encoder";
     printf("%s profiles:\n", codecType);
 
+    sp<IMediaCodecList> list = MediaCodecList::getInstance();
+    size_t numCodecs = list->countCodecs();
+
     for (size_t k = 0; k < sizeof(kMimeTypes) / sizeof(kMimeTypes[0]); ++k) {
         printf("type '%s':\n", kMimeTypes[k]);
 
-        Vector<CodecCapabilities> results;
-        // will retrieve hardware and software codecs
-        CHECK_EQ(QueryCodecs(omx, kMimeTypes[k],
-                             queryDecoders,
-                             &results), (status_t)OK);
-
-        for (size_t i = 0; i < results.size(); ++i) {
+        for (size_t index = 0; index < numCodecs; ++index) {
+            sp<MediaCodecInfo> info = list->getCodecInfo(index);
+            if (info == NULL || info->isEncoder() != !queryDecoders) {
+                continue;
+            }
+            sp<MediaCodecInfo::Capabilities> caps = info->getCapabilitiesFor(kMimeTypes[k]);
+            if (caps == NULL) {
+                continue;
+            }
             printf("  %s '%s' supports ",
-                       codecType, results[i].mComponentName.string());
+                       codecType, info->getCodecName());
 
-            if (results[i].mProfileLevels.size() == 0) {
-                    printf("NOTHING.\n");
-                    continue;
+            Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+            caps->getSupportedProfileLevels(&profileLevels);
+            if (profileLevels.size() == 0) {
+                printf("NOTHING.\n");
+                continue;
             }
 
-            for (size_t j = 0; j < results[i].mProfileLevels.size(); ++j) {
-                const CodecProfileLevel &profileLevel =
-                     results[i].mProfileLevels[j];
+            for (size_t j = 0; j < profileLevels.size(); ++j) {
+                const MediaCodecInfo::ProfileLevel &profileLevel = profileLevels[j];
 
-                printf("%s%" PRIu32 "/%" PRIu32, j > 0 ? ", " : "",
-                    profileLevel.mProfile, profileLevel.mLevel);
+                printf("%s%u/%u", j > 0 ? ", " : "",
+                        profileLevel.mProfile, profileLevel.mLevel);
             }
 
             printf("\n");
@@ -687,7 +699,7 @@
     sp<ALooper> looper;
 
     int res;
-    while ((res = getopt(argc, argv, "han:lm:b:ptsrow:kxSTd:D:")) >= 0) {
+    while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kxSTd:D:")) >= 0) {
         switch (res) {
             case 'a':
             {
@@ -695,6 +707,12 @@
                 break;
             }
 
+            case 'q':
+            {
+                showProgress = false;
+                break;
+            }
+
             case 'd':
             {
                 dumpStream = true;
@@ -881,23 +899,14 @@
     }
 
     if (dumpProfiles) {
-        sp<IServiceManager> sm = defaultServiceManager();
-        sp<IBinder> binder = sm->getService(String16("media.player"));
-        sp<IMediaPlayerService> service =
-            interface_cast<IMediaPlayerService>(binder);
-
-        CHECK(service.get() != NULL);
-
-        sp<IOMX> omx = service->getOMX();
-        CHECK(omx.get() != NULL);
-        dumpCodecProfiles(omx, true /* queryDecoders */);
-        dumpCodecProfiles(omx, false /* queryDecoders */);
+        dumpCodecProfiles(true /* queryDecoders */);
+        dumpCodecProfiles(false /* queryDecoders */);
     }
 
     if (listComponents) {
         sp<IServiceManager> sm = defaultServiceManager();
-        sp<IBinder> binder = sm->getService(String16("media.player"));
-        sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
+        sp<IBinder> binder = sm->getService(String16("media.codec"));
+        sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
 
         CHECK(service.get() != NULL);
 
@@ -954,16 +963,11 @@
                     false /* isControlledByApp */);
             gSurface = new Surface(producer);
         }
-
-        CHECK_EQ((status_t)OK,
-                 native_window_api_connect(
-                     gSurface.get(), NATIVE_WINDOW_API_MEDIA));
     }
 
     DataSource::RegisterDefaultSniffers();
 
-    OMXClient client;
-    status_t err = client.connect();
+    status_t err = OK;
 
     for (int k = 0; k < argc && err == OK; ++k) {
         bool syncInfoPresent = true;
@@ -985,8 +989,8 @@
             isJPEG = true;
         }
 
-        Vector<sp<MediaSource> > mediaSources;
-        sp<MediaSource> mediaSource;
+        Vector<sp<IMediaSource> > mediaSources;
+        sp<IMediaSource> mediaSource;
 
         if (isJPEG) {
             mediaSource = new JPEGSource(dataSource);
@@ -1005,7 +1009,7 @@
                 mediaSources.push(mediaSource);
             }
         } else {
-            sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+            sp<IMediaExtractor> extractor = MediaExtractor::Create(dataSource);
 
             if (extractor == NULL) {
                 fprintf(stderr, "could not create extractor.\n");
@@ -1016,7 +1020,10 @@
 
             if (meta != NULL) {
                 const char *mime;
-                CHECK(meta->findCString(kKeyMIMEType, &mime));
+                if (!meta->findCString(kKeyMIMEType, &mime)) {
+                    fprintf(stderr, "extractor did not provide MIME type.\n");
+                    return -1;
+                }
 
                 if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
                     syncInfoPresent = false;
@@ -1029,7 +1036,7 @@
                 bool haveAudio = false;
                 bool haveVideo = false;
                 for (size_t i = 0; i < numTracks; ++i) {
-                    sp<MediaSource> source = extractor->getTrack(i);
+                    sp<IMediaSource> source = extractor->getTrack(i);
 
                     const char *mime;
                     CHECK(source->getFormat()->findCString(
@@ -1059,6 +1066,9 @@
                     meta = extractor->getTrackMetaData(
                             i, MediaExtractor::kIncludeExtensiveMetaData);
 
+                    if (meta == NULL) {
+                        break;
+                    }
                     const char *mime;
                     meta->findCString(kKeyMIMEType, &mime);
 
@@ -1097,31 +1107,16 @@
         } else if (dumpStream) {
             dumpSource(mediaSource, dumpStreamFilename);
         } else if (dumpPCMStream) {
-            OMXClient client;
-            CHECK_EQ(client.connect(), (status_t)OK);
-
-            sp<MediaSource> decSource =
-                OMXCodec::Create(
-                        client.interface(),
-                        mediaSource->getFormat(),
-                        false,
-                        mediaSource,
-                        0,
-                        0);
-
+            sp<IMediaSource> decSource = SimpleDecodingSource::Create(mediaSource);
             dumpSource(decSource, dumpStreamFilename);
         } else if (seekTest) {
             performSeekTest(mediaSource);
         } else {
-            playSource(&client, mediaSource);
+            playSource(mediaSource);
         }
     }
 
     if ((useSurfaceAlloc || useSurfaceTexAlloc) && !audioOnly) {
-        CHECK_EQ((status_t)OK,
-                 native_window_api_disconnect(
-                     gSurface.get(), NATIVE_WINDOW_API_MEDIA));
-
         gSurface.clear();
 
         if (useSurfaceAlloc) {
@@ -1129,7 +1124,5 @@
         }
     }
 
-    client.disconnect();
-
     return 0;
 }
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 1a40e53..bca3832 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -165,7 +165,7 @@
 
     CHECK(dataSource != NULL);
 
-    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+    sp<IMediaExtractor> extractor = MediaExtractor::Create(dataSource);
     CHECK(extractor != NULL);
 
     mWriter = new MPEG2TSWriter(
diff --git a/drm/common/IDrmManagerService.cpp b/drm/common/IDrmManagerService.cpp
index f2e14b6..caae75f 100644
--- a/drm/common/IDrmManagerService.cpp
+++ b/drm/common/IDrmManagerService.cpp
@@ -303,7 +303,9 @@
         const String8 value = drmInforequest->get(key);
         if (key == String8("FileDescriptorKey")) {
             int fd = -1;
-            sscanf(value.string(), "FileDescriptor[%d]", &fd);
+            if (sscanf(value.string(), "FileDescriptor[%d]", &fd) != 1) {
+                sscanf(value.string(), "%d", &fd);
+            }
             data.writeFileDescriptor(fd);
         } else {
             data.writeString8((value == String8("")) ? String8("NULL") : value);
diff --git a/drm/drmserver/Android.mk b/drm/drmserver/Android.mk
index 48ea385..3b8bb04 100644
--- a/drm/drmserver/Android.mk
+++ b/drm/drmserver/Android.mk
@@ -36,10 +36,14 @@
     $(TOP)/frameworks/av/drm/libdrmframework/include \
     $(TOP)/frameworks/av/drm/libdrmframework/plugins/common/include
 
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
 LOCAL_MODULE:= drmserver
 
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_32_BIT_ONLY := true
 
+LOCAL_INIT_RC := drmserver.rc
+
 include $(BUILD_EXECUTABLE)
diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp
index d8aeb0c..e168ba2 100644
--- a/drm/drmserver/DrmManager.cpp
+++ b/drm/drmserver/DrmManager.cpp
@@ -370,7 +370,7 @@
 }
 
 status_t DrmManager::getAllSupportInfo(
-                    int uniqueId, int* length, DrmSupportInfo** drmSupportInfoArray) {
+                    int /* uniqueId */, int* length, DrmSupportInfo** drmSupportInfoArray) {
     Mutex::Autolock _l(mLock);
     Vector<String8> plugInPathList = mPlugInManager.getPlugInIdList();
     int size = plugInPathList.size();
diff --git a/drm/drmserver/DrmManagerService.cpp b/drm/drmserver/DrmManagerService.cpp
index 857d73e..dad599b 100644
--- a/drm/drmserver/DrmManagerService.cpp
+++ b/drm/drmserver/DrmManagerService.cpp
@@ -51,8 +51,7 @@
 const char *DrmManagerService::get_perm_label(drm_perm_t perm) {
     unsigned int index = perm;
 
-    if (index < 0 ||
-            index >= (sizeof(drm_perm_labels) / sizeof(drm_perm_labels[0]))) {
+    if (index >= (sizeof(drm_perm_labels) / sizeof(drm_perm_labels[0]))) {
         ALOGE("SELinux: Failed to retrieve permission label(perm=%d).\n", perm);
         abort();
     }
@@ -358,8 +357,12 @@
             }
         }
         if (dumpMem) {
-            dumpMemoryAddresses(fd);
+            result.append("\nDumping memory:\n");
+            std::string s = dumpMemoryAddresses(100 /* limit */);
+            result.append(s.c_str(), s.size());
         }
+#else
+        (void)args;
 #endif
     }
     write(fd, result.string(), result.size());
diff --git a/drm/drmserver/drmserver.rc b/drm/drmserver/drmserver.rc
new file mode 100644
index 0000000..de46fb9
--- /dev/null
+++ b/drm/drmserver/drmserver.rc
@@ -0,0 +1,5 @@
+service drm /system/bin/drmserver
+    class main
+    user drm
+    group drm system inet drmrpc readproc
+    writepid /dev/cpuset/foreground/tasks
diff --git a/drm/drmserver/main_drmserver.cpp b/drm/drmserver/main_drmserver.cpp
index 434d561..8f697a4 100644
--- a/drm/drmserver/main_drmserver.cpp
+++ b/drm/drmserver/main_drmserver.cpp
@@ -26,8 +26,10 @@
 
 using namespace android;
 
-int main(int argc, char** argv)
+int main()
 {
+    signal(SIGPIPE, SIG_IGN);
+
     sp<ProcessState> proc(ProcessState::self());
     sp<IServiceManager> sm = defaultServiceManager();
     ALOGV("ServiceManager: %p", sm.get());
diff --git a/drm/libdrmframework/Android.mk b/drm/libdrmframework/Android.mk
index 33f9d3b..cafcb94 100644
--- a/drm/libdrmframework/Android.mk
+++ b/drm/libdrmframework/Android.mk
@@ -38,6 +38,7 @@
     $(TOP)/frameworks/av/drm/libdrmframework/include \
     $(TOP)/frameworks/av/include
 
+LOCAL_CFLAGS += -Werror
 
 
 LOCAL_MODULE_TAGS := optional
diff --git a/drm/libdrmframework/DrmManagerClientImpl.cpp b/drm/libdrmframework/DrmManagerClientImpl.cpp
index 9457bb6..cbd013e 100644
--- a/drm/libdrmframework/DrmManagerClientImpl.cpp
+++ b/drm/libdrmframework/DrmManagerClientImpl.cpp
@@ -350,7 +350,8 @@
     }
 }
 
-void DrmManagerClientImpl::DeathNotifier::binderDied(const wp<IBinder>& who) {
+void DrmManagerClientImpl::DeathNotifier::binderDied(
+            const wp<IBinder>& /* who */) {
     Mutex::Autolock lock(sMutex);
     DrmManagerClientImpl::sDrmManagerService.clear();
     ALOGW("DrmManager server died!");
diff --git a/drm/libdrmframework/NoOpDrmManagerClientImpl.cpp b/drm/libdrmframework/NoOpDrmManagerClientImpl.cpp
index dab583d..1172e80 100644
--- a/drm/libdrmframework/NoOpDrmManagerClientImpl.cpp
+++ b/drm/libdrmframework/NoOpDrmManagerClientImpl.cpp
@@ -18,134 +18,209 @@
 
 namespace android {
 
-void NoOpDrmManagerClientImpl::remove(int uniqueId) {
+void NoOpDrmManagerClientImpl::remove(int /* uniqueId */) {
 }
 
-void NoOpDrmManagerClientImpl::addClient(int uniqueId) {
+void NoOpDrmManagerClientImpl::addClient(int /* uniqueId */) {
 }
 
-void NoOpDrmManagerClientImpl::removeClient(int uniqueId) {
+void NoOpDrmManagerClientImpl::removeClient(
+            int /* uniqueId */) {
 }
 
 status_t NoOpDrmManagerClientImpl::setOnInfoListener(
-            int uniqueId, const sp<DrmManagerClient::OnInfoListener>& infoListener) {
+            int /* uniqueId */,
+            const sp<DrmManagerClient::OnInfoListener>& /* infoListener */) {
     return UNKNOWN_ERROR;
 }
 
-DrmConstraints* NoOpDrmManagerClientImpl::getConstraints(int uniqueId, const String8* path, const int action) {
+DrmConstraints* NoOpDrmManagerClientImpl::getConstraints(
+            int /* uniqueId */,
+            const String8* /* path */,
+            const int /* action */) {
     return NULL;
 }
 
-DrmMetadata* NoOpDrmManagerClientImpl::getMetadata(int uniqueId, const String8* path) {
+DrmMetadata* NoOpDrmManagerClientImpl::getMetadata(
+            int /* uniqueId */,
+            const String8* /* path */) {
     return NULL;
 }
 
-bool NoOpDrmManagerClientImpl::canHandle(int uniqueId, const String8& path, const String8& mimeType) {
+bool NoOpDrmManagerClientImpl::canHandle(
+            int /* uniqueId */,
+            const String8& /* path */,
+            const String8& /* mimeType */) {
     return false;
 }
 
-DrmInfoStatus* NoOpDrmManagerClientImpl::processDrmInfo(int uniqueId, const DrmInfo* drmInfo) {
+DrmInfoStatus* NoOpDrmManagerClientImpl::processDrmInfo(
+            int /* uniqueId */,
+            const DrmInfo* /* drmInfo */) {
     return NULL;
 }
 
-DrmInfo* NoOpDrmManagerClientImpl::acquireDrmInfo(int uniqueId, const DrmInfoRequest* drmInfoRequest) {
+DrmInfo* NoOpDrmManagerClientImpl::acquireDrmInfo(
+            int /* uniqueId */,
+            const DrmInfoRequest* /* drmInfoRequest */) {
     return NULL;
 }
 
-status_t NoOpDrmManagerClientImpl::saveRights(int uniqueId, const DrmRights& drmRights,
-            const String8& rightsPath, const String8& contentPath) {
+status_t NoOpDrmManagerClientImpl::saveRights(
+            int /* uniqueId */,
+            const DrmRights& /* drmRights */,
+            const String8& /* rightsPath */,
+            const String8& /* contentPath */) {
     return UNKNOWN_ERROR;
 }
 
-String8 NoOpDrmManagerClientImpl::getOriginalMimeType(int uniqueId, const String8& path, int fd) {
+String8 NoOpDrmManagerClientImpl::getOriginalMimeType(
+            int /* uniqueId */,
+            const String8& /* path */,
+            int /* fd */) {
     return String8();
 }
 
-int NoOpDrmManagerClientImpl::getDrmObjectType(int uniqueId, const String8& path, const String8& mimeType) {
+int NoOpDrmManagerClientImpl::getDrmObjectType(
+            int /* uniqueId */,
+            const String8& /* path */,
+            const String8& /* mimeType */) {
     return -1;
 }
 
-int NoOpDrmManagerClientImpl::checkRightsStatus(int uniqueId, const String8& path, int action) {
+int NoOpDrmManagerClientImpl::checkRightsStatus(
+            int /* uniqueId */,
+            const String8& /* path */,
+            int /* action */) {
     return -1;
 }
 
-status_t NoOpDrmManagerClientImpl::consumeRights(int uniqueId, sp<DecryptHandle> &decryptHandle, int action, bool reserve) {
+status_t NoOpDrmManagerClientImpl::consumeRights(
+            int /* uniqueId */,
+            sp<DecryptHandle> &/* decryptHandle */,
+            int /* action */,
+            bool /* reserve */) {
     return UNKNOWN_ERROR;
 }
 
 status_t NoOpDrmManagerClientImpl::setPlaybackStatus(
-            int uniqueId, sp<DecryptHandle> &decryptHandle, int playbackStatus, int64_t position) {
+            int /* uniqueId */,
+            sp<DecryptHandle> &/* decryptHandle */,
+            int /* playbackStatus */,
+            int64_t /* position */) {
     return UNKNOWN_ERROR;
 }
 
 bool NoOpDrmManagerClientImpl::validateAction(
-        int uniqueId, const String8& path, int action, const ActionDescription& description) {
+            int /* uniqueId */,
+            const String8& /* path */,
+            int /* action */,
+            const ActionDescription& /* description */) {
     return false;
 }
 
-status_t NoOpDrmManagerClientImpl::removeRights(int uniqueId, const String8& path) {
+status_t NoOpDrmManagerClientImpl::removeRights(
+            int /* uniqueId */,
+            const String8& /* path */) {
     return UNKNOWN_ERROR;
 }
 
-status_t NoOpDrmManagerClientImpl::removeAllRights(int uniqueId) {
+status_t NoOpDrmManagerClientImpl::removeAllRights(
+            int /* uniqueId */) {
     return UNKNOWN_ERROR;
 }
 
-int NoOpDrmManagerClientImpl::openConvertSession(int uniqueId, const String8& mimeType) {
+int NoOpDrmManagerClientImpl::openConvertSession(
+            int /* uniqueId */,
+            const String8& /* mimeType */) {
     return -1;
 }
 
-DrmConvertedStatus* NoOpDrmManagerClientImpl::convertData(int uniqueId, int convertId, const DrmBuffer* inputData) {
+DrmConvertedStatus* NoOpDrmManagerClientImpl::convertData(
+            int /* uniqueId */,
+            int /* convertId */,
+            const DrmBuffer* /* inputData */) {
     return NULL;
 }
 
-DrmConvertedStatus* NoOpDrmManagerClientImpl::closeConvertSession(int uniqueId, int convertId) {
+DrmConvertedStatus* NoOpDrmManagerClientImpl::closeConvertSession(
+            int /* uniqueId */,
+            int /* convertId */) {
     return NULL;
 }
 
-status_t NoOpDrmManagerClientImpl::getAllSupportInfo(int uniqueId, int* length, DrmSupportInfo** drmSupportInfoArray) {
+status_t NoOpDrmManagerClientImpl::getAllSupportInfo(
+            int /* uniqueId */,
+            int* /* length */,
+            DrmSupportInfo** /* drmSupportInfoArray */) {
     return UNKNOWN_ERROR;
 }
 
 sp<DecryptHandle> NoOpDrmManagerClientImpl::openDecryptSession(
-            int uniqueId, int fd, off64_t offset, off64_t length, const char* mime) {
+            int /* uniqueId */,
+            int /* fd */,
+            off64_t /* offset */,
+            off64_t /* length */,
+            const char* /* mime */) {
     return NULL;
 }
 
 sp<DecryptHandle> NoOpDrmManagerClientImpl::openDecryptSession(
-            int uniqueId, const char* uri, const char* mime) {
+            int /* uniqueId */,
+            const char* /* uri */,
+            const char* /* mime */) {
     return NULL;
 }
 
-sp<DecryptHandle> NoOpDrmManagerClientImpl::openDecryptSession(int uniqueId, const DrmBuffer& buf,
-            const String8& mimeType) {
+sp<DecryptHandle> NoOpDrmManagerClientImpl::openDecryptSession(
+            int /* uniqueId */,
+            const DrmBuffer& /* buf */,
+            const String8& /* mimeType */) {
     return NULL;
 }
 
-status_t NoOpDrmManagerClientImpl::closeDecryptSession(int uniqueId, sp<DecryptHandle> &decryptHandle) {
+status_t NoOpDrmManagerClientImpl::closeDecryptSession(
+            int /* uniqueId */,
+            sp<DecryptHandle> &/* decryptHandle */) {
     return UNKNOWN_ERROR;
 }
 
-status_t NoOpDrmManagerClientImpl::initializeDecryptUnit(int uniqueId, sp<DecryptHandle> &decryptHandle,
-            int decryptUnitId, const DrmBuffer* headerInfo) {
+status_t NoOpDrmManagerClientImpl::initializeDecryptUnit(
+            int /* uniqueId */,
+            sp<DecryptHandle> &/* decryptHandle */,
+            int /* decryptUnitId */,
+            const DrmBuffer* /* headerInfo */) {
     return UNKNOWN_ERROR;
 }
 
-status_t NoOpDrmManagerClientImpl::decrypt(int uniqueId, sp<DecryptHandle> &decryptHandle, int decryptUnitId,
-            const DrmBuffer* encBuffer, DrmBuffer** decBuffer, DrmBuffer* IV) {
+status_t NoOpDrmManagerClientImpl::decrypt(
+            int /* uniqueId */,
+            sp<DecryptHandle> &/* decryptHandle */,
+            int /* decryptUnitId */,
+            const DrmBuffer* /* encBuffer */,
+            DrmBuffer** /* decBuffer */,
+            DrmBuffer* /* IV */) {
     return UNKNOWN_ERROR;
 }
 
-status_t NoOpDrmManagerClientImpl::finalizeDecryptUnit(int uniqueId, sp<DecryptHandle> &decryptHandle, int decryptUnitId) {
+status_t NoOpDrmManagerClientImpl::finalizeDecryptUnit(
+            int /* uniqueId */,
+            sp<DecryptHandle> &/* decryptHandle */,
+            int /* decryptUnitId */) {
     return UNKNOWN_ERROR;
 }
 
-ssize_t NoOpDrmManagerClientImpl::pread(int uniqueId, sp<DecryptHandle> &decryptHandle,
-            void* buffer, ssize_t numBytes, off64_t offset) {
+ssize_t NoOpDrmManagerClientImpl::pread(
+            int /* uniqueId */,
+            sp<DecryptHandle> &/* decryptHandle */,
+            void* /* buffer */,
+            ssize_t /* numBytes */,
+            off64_t /* offset */) {
     return -1;
 }
 
-status_t NoOpDrmManagerClientImpl::notify(const DrmInfoEvent& event) {
+status_t NoOpDrmManagerClientImpl::notify(
+            const DrmInfoEvent& /* event */) {
     return UNKNOWN_ERROR;
 }
 
diff --git a/drm/libdrmframework/plugins/common/include/DrmEngineBase.h b/drm/libdrmframework/plugins/common/include/DrmEngineBase.h
index fa51c13..417107f 100644
--- a/drm/libdrmframework/plugins/common/include/DrmEngineBase.h
+++ b/drm/libdrmframework/plugins/common/include/DrmEngineBase.h
@@ -398,9 +398,9 @@
      *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
      */
     virtual status_t onOpenDecryptSession(
-            int uniqueId, DecryptHandle* decryptHandle,
-            int fd, off64_t offset, off64_t length,
-            const char* mime) {
+            int /* uniqueId */, DecryptHandle* /* decryptHandle */,
+            int /* fd */, off64_t /* offset */, off64_t /* length */,
+            const char* /* mime */) {
 
         return DRM_ERROR_CANNOT_HANDLE;
     }
@@ -430,8 +430,8 @@
      *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
      */
     virtual status_t onOpenDecryptSession(
-            int uniqueId, DecryptHandle* decryptHandle,
-            const char* uri, const char* mime) {
+            int /* uniqueId */, DecryptHandle* /* decryptHandle */,
+            const char* /* uri */, const char* /* mime */) {
 
         return DRM_ERROR_CANNOT_HANDLE;
     }
@@ -446,8 +446,10 @@
      * @return
      *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
      */
-    virtual status_t onOpenDecryptSession(int uniqueId, DecryptHandle* decryptHandle,
-            const DrmBuffer& buf, const String8& mimeType) {
+    virtual status_t onOpenDecryptSession(int /* uniqueId */,
+            DecryptHandle* /* decryptHandle */,
+            const DrmBuffer& /* buf */,
+            const String8& /* mimeType */) {
         return DRM_ERROR_CANNOT_HANDLE;
     }
 
diff --git a/drm/libdrmframework/plugins/common/util/src/MimeTypeUtil.cpp b/drm/libdrmframework/plugins/common/util/src/MimeTypeUtil.cpp
index 576ed15..4bd1adb 100644
--- a/drm/libdrmframework/plugins/common/util/src/MimeTypeUtil.cpp
+++ b/drm/libdrmframework/plugins/common/util/src/MimeTypeUtil.cpp
@@ -115,7 +115,7 @@
  * replacement mimetype otherwise the original mimetype
  * is returned.
  *
- * If the mimetype is of unsupported group i.e. application/*
+ * If the mimetype is of unsupported group i.e. application / *
  * then "unsupported/drm.mimetype" will be returned.
  *
  * @param mimeType - mimetype in lower case to convert.
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
index f400732..a495616 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
@@ -119,7 +119,7 @@
     return drmConstraints;
 }
 
-DrmMetadata* FwdLockEngine::onGetMetadata(int uniqueId, const String8* path) {
+DrmMetadata* FwdLockEngine::onGetMetadata(int /* uniqueId */, const String8* path) {
     DrmMetadata* drmMetadata = NULL;
 
     LOG_VERBOSE("FwdLockEngine::onGetMetadata");
@@ -132,7 +132,7 @@
     return drmMetadata;
 }
 
-android::status_t FwdLockEngine::onInitialize(int uniqueId) {
+android::status_t FwdLockEngine::onInitialize(int /* uniqueId */) {
     LOG_VERBOSE("FwdLockEngine::onInitialize");
 
     if (FwdLockGlue_InitializeKeyEncryption()) {
@@ -146,14 +146,16 @@
 }
 
 android::status_t
-FwdLockEngine::onSetOnInfoListener(int uniqueId, const IDrmEngine::OnInfoListener* infoListener) {
+FwdLockEngine::onSetOnInfoListener(
+            int /* uniqueId */,
+            const IDrmEngine::OnInfoListener* /* infoListener */) {
     // Not used
     LOG_VERBOSE("FwdLockEngine::onSetOnInfoListener");
 
     return DRM_NO_ERROR;
 }
 
-android::status_t FwdLockEngine::onTerminate(int uniqueId) {
+android::status_t FwdLockEngine::onTerminate(int /* uniqueId */) {
     LOG_VERBOSE("FwdLockEngine::onTerminate");
 
     return DRM_NO_ERROR;
@@ -207,7 +209,7 @@
     return false;
 }
 
-DrmSupportInfo* FwdLockEngine::onGetSupportInfo(int uniqueId) {
+DrmSupportInfo* FwdLockEngine::onGetSupportInfo(int /* uniqueId */) {
     DrmSupportInfo* pSupportInfo = new DrmSupportInfo();
 
     LOG_VERBOSE("FwdLockEngine::onGetSupportInfo");
@@ -222,14 +224,14 @@
     return pSupportInfo;
 }
 
-bool FwdLockEngine::onCanHandle(int uniqueId, const String8& path) {
+bool FwdLockEngine::onCanHandle(int /* uniqueId */, const String8& path) {
     bool result = false;
 
     String8 extString = path.getPathExtension();
     return IsFileSuffixSupported(extString);
 }
 
-DrmInfoStatus* FwdLockEngine::onProcessDrmInfo(int uniqueId, const DrmInfo* drmInfo) {
+DrmInfoStatus* FwdLockEngine::onProcessDrmInfo(int /* uniqueId */, const DrmInfo* /* drmInfo */) {
     DrmInfoStatus *drmInfoStatus = NULL;
 
     // Nothing to process
@@ -242,16 +244,17 @@
 }
 
 status_t FwdLockEngine::onSaveRights(
-            int uniqueId,
-            const DrmRights& drmRights,
-            const String8& rightsPath,
-            const String8& contentPath) {
+            int /* uniqueId */,
+            const DrmRights& /* drmRights */,
+            const String8& /* rightsPath */,
+            const String8& /* contentPath */) {
     // No rights to save. Return
     LOG_VERBOSE("FwdLockEngine::onSaveRights");
     return DRM_ERROR_UNKNOWN;
 }
 
-DrmInfo* FwdLockEngine::onAcquireDrmInfo(int uniqueId, const DrmInfoRequest* drmInfoRequest) {
+DrmInfo* FwdLockEngine::onAcquireDrmInfo(
+            int /* uniqueId */, const DrmInfoRequest* /* drmInfoRequest */) {
     DrmInfo* drmInfo = NULL;
 
     // Nothing to be done for Forward Lock file
@@ -290,10 +293,10 @@
     return result;
 }
 
-status_t FwdLockEngine::onConsumeRights(int uniqueId,
-                                        DecryptHandle* decryptHandle,
-                                        int action,
-                                        bool reserve) {
+status_t FwdLockEngine::onConsumeRights(int /* uniqueId */,
+                                        DecryptHandle* /* decryptHandle */,
+                                        int /* action */,
+                                        bool /* reserve */) {
     // No rights consumption
     LOG_VERBOSE("FwdLockEngine::onConsumeRights");
     return DRM_NO_ERROR;
@@ -302,14 +305,16 @@
 bool FwdLockEngine::onValidateAction(int uniqueId,
                                      const String8& path,
                                      int action,
-                                     const ActionDescription& description) {
+                                     const ActionDescription& /* description */) {
     LOG_VERBOSE("FwdLockEngine::onValidateAction");
 
     // For the forwardlock engine checkRights and ValidateAction are the same.
     return (onCheckRightsStatus(uniqueId, path, action) == RightsStatus::RIGHTS_VALID);
 }
 
-String8 FwdLockEngine::onGetOriginalMimeType(int uniqueId, const String8& path, int fd) {
+String8 FwdLockEngine::onGetOriginalMimeType(int /* uniqueId */,
+                                             const String8& /* path */,
+                                             int fd) {
     LOG_VERBOSE("FwdLockEngine::onGetOriginalMimeType");
     String8 mimeString = String8("");
     int fileDesc = dup(fd);
@@ -354,32 +359,32 @@
     return DrmObjectType::UNKNOWN;
 }
 
-status_t FwdLockEngine::onRemoveRights(int uniqueId, const String8& path) {
+status_t FwdLockEngine::onRemoveRights(int /* uniqueId */, const String8& /* path */) {
     // No Rights to remove
     LOG_VERBOSE("FwdLockEngine::onRemoveRights");
     return DRM_NO_ERROR;
 }
 
-status_t FwdLockEngine::onRemoveAllRights(int uniqueId) {
+status_t FwdLockEngine::onRemoveAllRights(int /* uniqueId */) {
     // No rights to remove
     LOG_VERBOSE("FwdLockEngine::onRemoveAllRights");
     return DRM_NO_ERROR;
 }
 
 #ifdef USE_64BIT_DRM_API
-status_t FwdLockEngine::onSetPlaybackStatus(int uniqueId, DecryptHandle* decryptHandle,
-                                            int playbackStatus, int64_t position) {
+status_t FwdLockEngine::onSetPlaybackStatus(int /* uniqueId */, DecryptHandle* /* decryptHandle */,
+                                            int /* playbackStatus */, int64_t /* position */) {
 #else
-status_t FwdLockEngine::onSetPlaybackStatus(int uniqueId, DecryptHandle* decryptHandle,
-                                            int playbackStatus, int position) {
+status_t FwdLockEngine::onSetPlaybackStatus(int /* uniqueId */, DecryptHandle* /* decryptHandle */,
+                                            int /* playbackStatus */, int /* position */) {
 #endif
     // Not used
     LOG_VERBOSE("FwdLockEngine::onSetPlaybackStatus");
     return DRM_NO_ERROR;
 }
 
-status_t FwdLockEngine::onOpenConvertSession(int uniqueId,
-                                         int convertId) {
+status_t FwdLockEngine::onOpenConvertSession(
+            int /* uniqueId */, int convertId) {
     status_t result = DRM_ERROR_UNKNOWN;
     LOG_VERBOSE("FwdLockEngine::onOpenConvertSession");
     if (!convertSessionMap.isCreated(convertId)) {
@@ -396,7 +401,7 @@
     return result;
 }
 
-DrmConvertedStatus* FwdLockEngine::onConvertData(int uniqueId,
+DrmConvertedStatus* FwdLockEngine::onConvertData(int /* uniqueId */,
                                                  int convertId,
                                                  const DrmBuffer* inputData) {
     FwdLockConv_Status_t retStatus = FwdLockConv_Status_InvalidArgument;
@@ -432,7 +437,7 @@
     return new DrmConvertedStatus(getConvertedStatus(retStatus), convResult, offset);
 }
 
-DrmConvertedStatus* FwdLockEngine::onCloseConvertSession(int uniqueId,
+DrmConvertedStatus* FwdLockEngine::onCloseConvertSession(int /* uniqueId */,
                                                          int convertId) {
     FwdLockConv_Status_t retStatus = FwdLockConv_Status_InvalidArgument;
     DrmBuffer *convResult = new DrmBuffer(NULL, 0);
@@ -464,17 +469,17 @@
 }
 
 #ifdef USE_64BIT_DRM_API
-status_t FwdLockEngine::onOpenDecryptSession(int uniqueId,
+status_t FwdLockEngine::onOpenDecryptSession(int /* uniqueId */,
                                              DecryptHandle* decryptHandle,
                                              int fd,
                                              off64_t offset,
-                                             off64_t length) {
+                                             off64_t /* length */) {
 #else
-status_t FwdLockEngine::onOpenDecryptSession(int uniqueId,
+status_t FwdLockEngine::onOpenDecryptSession(int /* uniqueId */,
                                              DecryptHandle* decryptHandle,
                                              int fd,
                                              int offset,
-                                             int length) {
+                                             int /* length */) {
 #endif
     status_t result = DRM_ERROR_CANNOT_HANDLE;
     int fileDesc = -1;
@@ -552,7 +557,7 @@
     return result;
 }
 
-status_t FwdLockEngine::onCloseDecryptSession(int uniqueId,
+status_t FwdLockEngine::onCloseDecryptSession(int /* uniqueId */,
                                               DecryptHandle* decryptHandle) {
     status_t result = DRM_ERROR_UNKNOWN;
     LOG_VERBOSE("FwdLockEngine::onCloseDecryptSession");
@@ -584,37 +589,42 @@
     return result;
 }
 
-status_t FwdLockEngine::onInitializeDecryptUnit(int uniqueId,
-                                                DecryptHandle* decryptHandle,
-                                                int decryptUnitId,
-                                                const DrmBuffer* headerInfo) {
+status_t FwdLockEngine::onInitializeDecryptUnit(int /* uniqueId */,
+                                                DecryptHandle* /* decryptHandle */,
+                                                int /* decryptUnitId */,
+                                                const DrmBuffer* /* headerInfo */) {
     ALOGE("FwdLockEngine::onInitializeDecryptUnit is not supported for this DRM scheme");
     return DRM_ERROR_UNKNOWN;
 }
 
-status_t FwdLockEngine::onDecrypt(int uniqueId, DecryptHandle* decryptHandle, int decryptUnitId,
-            const DrmBuffer* encBuffer, DrmBuffer** decBuffer, DrmBuffer* IV) {
+status_t FwdLockEngine::onDecrypt(
+            int /* uniqueId */,
+            DecryptHandle* /* decryptHandle */,
+            int /* decryptUnitId */,
+            const DrmBuffer* /* encBuffer */,
+            DrmBuffer** /* decBuffer */,
+            DrmBuffer* /* IV */) {
     ALOGE("FwdLockEngine::onDecrypt is not supported for this DRM scheme");
     return DRM_ERROR_UNKNOWN;
 }
 
-status_t FwdLockEngine::onDecrypt(int uniqueId,
-                                  DecryptHandle* decryptHandle,
-                                  int decryptUnitId,
-                                  const DrmBuffer* encBuffer,
-                                  DrmBuffer** decBuffer) {
+status_t FwdLockEngine::onDecrypt(int /* uniqueId */,
+                                  DecryptHandle* /* decryptHandle */,
+                                  int /* decryptUnitId */,
+                                  const DrmBuffer* /* encBuffer */,
+                                  DrmBuffer** /* decBuffer */) {
     ALOGE("FwdLockEngine::onDecrypt is not supported for this DRM scheme");
     return DRM_ERROR_UNKNOWN;
 }
 
-status_t FwdLockEngine::onFinalizeDecryptUnit(int uniqueId,
-                                              DecryptHandle* decryptHandle,
-                                              int decryptUnitId) {
+status_t FwdLockEngine::onFinalizeDecryptUnit(int /* uniqueId */,
+                                              DecryptHandle* /* decryptHandle */,
+                                              int /* decryptUnitId */) {
     ALOGE("FwdLockEngine::onFinalizeDecryptUnit is not supported for this DRM scheme");
     return DRM_ERROR_UNKNOWN;
 }
 
-ssize_t FwdLockEngine::onRead(int uniqueId,
+ssize_t FwdLockEngine::onRead(int /* uniqueId */,
                               DecryptHandle* decryptHandle,
                               void* buffer,
                               int numBytes) {
@@ -640,10 +650,10 @@
 }
 
 #ifdef USE_64BIT_DRM_API
-off64_t FwdLockEngine::onLseek(int uniqueId, DecryptHandle* decryptHandle,
+off64_t FwdLockEngine::onLseek(int /* uniqueId */, DecryptHandle* decryptHandle,
                                off64_t offset, int whence) {
 #else
-off_t FwdLockEngine::onLseek(int uniqueId, DecryptHandle* decryptHandle,
+off_t FwdLockEngine::onLseek(int /* uniqueId */, DecryptHandle* decryptHandle,
                              off_t offset, int whence) {
 #endif
     off_t offval = -1;
diff --git a/drm/libmediadrm/Android.mk b/drm/libmediadrm/Android.mk
new file mode 100644
index 0000000..6a2ed31
--- /dev/null
+++ b/drm/libmediadrm/Android.mk
@@ -0,0 +1,36 @@
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libmediadrm
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+	Crypto.cpp \
+	Drm.cpp \
+	DrmSessionManager.cpp \
+	SharedLibrary.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+	libbinder \
+	libcrypto \
+	libcutils \
+	libdl \
+	liblog \
+	libmedia \
+	libstagefright \
+	libstagefright_foundation \
+	libutils
+
+LOCAL_C_INCLUDES := \
+    libcore/include
+
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CLANG := true
+
+LOCAL_MODULE:= libmediadrm
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libmediaplayerservice/Crypto.cpp b/drm/libmediadrm/Crypto.cpp
similarity index 96%
rename from media/libmediaplayerservice/Crypto.cpp
rename to drm/libmediadrm/Crypto.cpp
index 147d35f..79633cb 100644
--- a/media/libmediaplayerservice/Crypto.cpp
+++ b/drm/libmediadrm/Crypto.cpp
@@ -20,9 +20,8 @@
 #include <dirent.h>
 #include <dlfcn.h>
 
-#include "Crypto.h"
-
 #include <binder/IMemory.h>
+#include <media/Crypto.h>
 #include <media/hardware/CryptoAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AString.h>
@@ -235,10 +234,11 @@
 }
 
 ssize_t Crypto::decrypt(
-        bool secure,
+        DestinationType dstType,
         const uint8_t key[16],
         const uint8_t iv[16],
         CryptoPlugin::Mode mode,
+        const CryptoPlugin::Pattern &pattern,
         const sp<IMemory> &sharedBuffer, size_t offset,
         const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
         void *dstPtr,
@@ -256,7 +256,8 @@
     const void *srcPtr = static_cast<uint8_t *>(sharedBuffer->pointer()) + offset;
 
     return mPlugin->decrypt(
-            secure, key, iv, mode, srcPtr, subSamples, numSubSamples, dstPtr,
+            dstType != kDestinationTypeVmPointer,
+            key, iv, mode, pattern, srcPtr, subSamples, numSubSamples, dstPtr,
             errorDetailMsg);
 }
 
diff --git a/media/libmediaplayerservice/Drm.cpp b/drm/libmediadrm/Drm.cpp
similarity index 97%
rename from media/libmediaplayerservice/Drm.cpp
rename to drm/libmediadrm/Drm.cpp
index a7f6f8b..7c1f5c8 100644
--- a/media/libmediaplayerservice/Drm.cpp
+++ b/drm/libmediadrm/Drm.cpp
@@ -21,10 +21,9 @@
 #include <dirent.h>
 #include <dlfcn.h>
 
-#include "Drm.h"
-
-#include "DrmSessionClientInterface.h"
-#include "DrmSessionManager.h"
+#include <media/DrmSessionClientInterface.h>
+#include <media/DrmSessionManager.h>
+#include <media/Drm.h>
 #include <media/drm/DrmAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AString.h>
@@ -40,9 +39,6 @@
 }
 
 static bool checkPermission(const char* permissionString) {
-#ifndef HAVE_ANDROID_OS
-    return true;
-#endif
     if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
     bool ok = checkCallingPermission(String16(permissionString));
     if (!ok) ALOGE("Request requires %s", permissionString);
@@ -519,24 +515,6 @@
     return mPlugin->provideProvisionResponse(response, certificate, wrappedKey);
 }
 
-status_t Drm::unprovisionDevice() {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mInitCheck != OK) {
-        return mInitCheck;
-    }
-
-    if (mPlugin == NULL) {
-        return -EINVAL;
-    }
-
-    if (!checkPermission("android.permission.REMOVE_DRM_CERTIFICATES")) {
-        return -EPERM;
-    }
-
-    return mPlugin->unprovisionDevice();
-}
-
 status_t Drm::getSecureStops(List<Vector<uint8_t> > &secureStops) {
     Mutex::Autolock autoLock(mLock);
 
diff --git a/media/libmediaplayerservice/DrmSessionManager.cpp b/drm/libmediadrm/DrmSessionManager.cpp
similarity index 98%
rename from media/libmediaplayerservice/DrmSessionManager.cpp
rename to drm/libmediadrm/DrmSessionManager.cpp
index 641f881..a87fb9d 100644
--- a/media/libmediaplayerservice/DrmSessionManager.cpp
+++ b/drm/libmediadrm/DrmSessionManager.cpp
@@ -18,12 +18,11 @@
 #define LOG_TAG "DrmSessionManager"
 #include <utils/Log.h>
 
-#include "DrmSessionManager.h"
-
-#include "DrmSessionClientInterface.h"
 #include <binder/IPCThreadState.h>
 #include <binder/IProcessInfoService.h>
 #include <binder/IServiceManager.h>
+#include <media/DrmSessionManager.h>
+#include <media/DrmSessionClientInterface.h>
 #include <media/stagefright/ProcessInfo.h>
 #include <unistd.h>
 #include <utils/String8.h>
diff --git a/media/libmediaplayerservice/SharedLibrary.cpp b/drm/libmediadrm/SharedLibrary.cpp
similarity index 97%
rename from media/libmediaplayerservice/SharedLibrary.cpp
rename to drm/libmediadrm/SharedLibrary.cpp
index 34db761..74b3a71 100644
--- a/media/libmediaplayerservice/SharedLibrary.cpp
+++ b/drm/libmediadrm/SharedLibrary.cpp
@@ -16,12 +16,11 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "Drm"
-#include <utils/Log.h>
-#include <media/stagefright/foundation/ADebug.h>
 
 #include <dlfcn.h>
-
-#include "SharedLibrary.h"
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/SharedLibrary.h>
+#include <utils/Log.h>
 
 namespace android {
 
diff --git a/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp
index 53cbf80..ee97976 100644
--- a/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp
@@ -33,7 +33,7 @@
 // decrypted data.  In theory, the output size can be larger than the input
 // size, but in practice this will never happen for AES-CTR.
 ssize_t CryptoPlugin::decrypt(bool secure, const KeyId keyId, const Iv iv,
-                              Mode mode, const void* srcPtr,
+                              Mode mode, const Pattern &/* pattern */, const void* srcPtr,
                               const SubSample* subSamples, size_t numSubSamples,
                               void* dstPtr, AString* errorDetailMsg) {
     if (secure) {
diff --git a/drm/mediadrm/plugins/clearkey/CryptoPlugin.h b/drm/mediadrm/plugins/clearkey/CryptoPlugin.h
index fd38f28..de84c36 100644
--- a/drm/mediadrm/plugins/clearkey/CryptoPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/CryptoPlugin.h
@@ -44,7 +44,7 @@
 
     virtual ssize_t decrypt(
             bool secure, const KeyId keyId, const Iv iv,
-            Mode mode, const void* srcPtr,
+            Mode mode, const Pattern &pattern, const void* srcPtr,
             const SubSample* subSamples, size_t numSubSamples,
             void* dstPtr, android::AString* errorDetailMsg);
 
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index ba4aefe..9095045 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -105,10 +105,6 @@
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
 
-    virtual status_t unprovisionDevice() {
-        return android::ERROR_DRM_CANNOT_HANDLE;
-    }
-
     virtual status_t getSecureStops(List<Vector<uint8_t> >& secureStops) {
         UNUSED(secureStops);
         return android::ERROR_DRM_CANNOT_HANDLE;
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index 851ad2c..1e80f8e 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -312,12 +312,6 @@
         return OK;
     }
 
-    status_t MockDrmPlugin::unprovisionDevice()
-    {
-        ALOGD("MockDrmPlugin::unprovisionDevice()");
-        return OK;
-    }
-
     status_t MockDrmPlugin::getSecureStop(Vector<uint8_t> const & /* ssid */,
                                           Vector<uint8_t> & secureStop)
     {
@@ -798,15 +792,17 @@
 
     ssize_t
     MockCryptoPlugin::decrypt(bool secure, const uint8_t key[16], const uint8_t iv[16],
-                              Mode mode, const void *srcPtr, const SubSample *subSamples,
-                              size_t numSubSamples, void *dstPtr, AString * /* errorDetailMsg */)
+            Mode mode, const Pattern &pattern, const void *srcPtr,
+            const SubSample *subSamples, size_t numSubSamples,
+            void *dstPtr, AString * /* errorDetailMsg */)
     {
-        ALOGD("MockCryptoPlugin::decrypt(secure=%d, key=%s, iv=%s, mode=%d, src=%p, "
+        ALOGD("MockCryptoPlugin::decrypt(secure=%d, key=%s, iv=%s, mode=%d, "
+              "pattern:{encryptBlocks=%d, skipBlocks=%d} src=%p, "
               "subSamples=%s, dst=%p)",
               (int)secure,
               arrayToString(key, sizeof(key)).string(),
               arrayToString(iv, sizeof(iv)).string(),
-              (int)mode, srcPtr,
+              (int)mode, pattern.mEncryptBlocks, pattern.mSkipBlocks, srcPtr,
               subSamplesToString(subSamples, numSubSamples).string(),
               dstPtr);
         return OK;
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
index d0f2ddb..40d4e84 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
@@ -86,8 +86,6 @@
                                           Vector<uint8_t> &certificate,
                                           Vector<uint8_t> &wrappedKey);
 
-        status_t unprovisionDevice();
-
         status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
         status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
         status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
@@ -161,7 +159,7 @@
 
         ssize_t decrypt(bool secure,
             const uint8_t key[16], const uint8_t iv[16],
-            Mode mode, const void *srcPtr,
+            Mode mode, const Pattern &pattern, const void *srcPtr,
             const SubSample *subSamples, size_t numSubSamples,
             void *dstPtr, AString *errorDetailMsg);
     private:
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 2b60842..be793a2 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -18,13 +18,15 @@
 #define ANDROID_HARDWARE_CAMERA_H
 
 #include <utils/Timers.h>
+
+#include <android/hardware/ICameraService.h>
+
 #include <gui/IGraphicBufferProducer.h>
 #include <system/camera.h>
-#include <camera/ICameraClient.h>
 #include <camera/ICameraRecordingProxy.h>
 #include <camera/ICameraRecordingProxyListener.h>
-#include <camera/ICameraService.h>
-#include <camera/ICamera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <camera/android/hardware/ICameraClient.h>
 #include <camera/CameraBase.h>
 
 namespace android {
@@ -41,6 +43,7 @@
     virtual void postData(int32_t msgType, const sp<IMemory>& dataPtr,
                           camera_frame_metadata_t *metadata) = 0;
     virtual void postDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) = 0;
+    virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle) = 0;
 };
 
 class Camera;
@@ -48,31 +51,35 @@
 template <>
 struct CameraTraits<Camera>
 {
-    typedef CameraListener        TCamListener;
-    typedef ICamera               TCamUser;
-    typedef ICameraClient         TCamCallbacks;
-    typedef status_t (ICameraService::*TCamConnectService)(const sp<ICameraClient>&,
-                                                           int, const String16&, int,
-                                                           /*out*/
-                                                           sp<ICamera>&);
+    typedef CameraListener                     TCamListener;
+    typedef ::android::hardware::ICamera       TCamUser;
+    typedef ::android::hardware::ICameraClient TCamCallbacks;
+    typedef ::android::binder::Status(::android::hardware::ICameraService::*TCamConnectService)
+        (const sp<::android::hardware::ICameraClient>&,
+        int, const String16&, int, int,
+        /*out*/
+        sp<::android::hardware::ICamera>*);
     static TCamConnectService     fnConnectService;
 };
 
 
 class Camera :
     public CameraBase<Camera>,
-    public BnCameraClient
+    public ::android::hardware::BnCameraClient
 {
 public:
     enum {
-        USE_CALLING_UID = ICameraService::USE_CALLING_UID
+        USE_CALLING_UID = ::android::hardware::ICameraService::USE_CALLING_UID
+    };
+    enum {
+        USE_CALLING_PID = ::android::hardware::ICameraService::USE_CALLING_PID
     };
 
             // construct a camera client from an existing remote
-    static  sp<Camera>  create(const sp<ICamera>& camera);
+    static  sp<Camera>  create(const sp<::android::hardware::ICamera>& camera);
     static  sp<Camera>  connect(int cameraId,
                                 const String16& clientPackageName,
-                                int clientUid);
+                                int clientUid, int clientPid);
 
     static  status_t  connectLegacy(int cameraId, int halVersion,
                                      const String16& clientPackageName,
@@ -108,6 +115,9 @@
             // release a recording frame
             void        releaseRecordingFrame(const sp<IMemory>& mem);
 
+            // release a recording frame handle
+            void        releaseRecordingFrameHandle(native_handle_t *handle);
+
             // autoFocus - status returned from callback
             status_t    autoFocus();
 
@@ -126,8 +136,15 @@
             // send command to camera driver
             status_t    sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
 
-            // tell camera hal to store meta data or real YUV in video buffers.
-            status_t    storeMetaDataInBuffers(bool enabled);
+            // Tell camera how to pass video buffers. videoBufferMode is one of VIDEO_BUFFER_MODE_*.
+            // Returns OK if the specified video buffer mode is supported. If videoBufferMode is
+            // VIDEO_BUFFER_MODE_BUFFER_QUEUE, setVideoTarget() must be called before starting
+            // video recording.
+            status_t    setVideoBufferMode(int32_t videoBufferMode);
+
+            // Set the video buffer producer for camera to use in VIDEO_BUFFER_MODE_BUFFER_QUEUE
+            // mode.
+            status_t    setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
 
             void        setListener(const sp<CameraListener>& listener);
             void        setRecordingProxyListener(const sp<ICameraRecordingProxyListener>& listener);
@@ -148,6 +165,7 @@
     virtual void        dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
                                      camera_frame_metadata_t *metadata);
     virtual void        dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
+    virtual void        recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle);
 
     class RecordingProxy : public BnCameraRecordingProxy
     {
@@ -158,6 +176,7 @@
         virtual status_t startRecording(const sp<ICameraRecordingProxyListener>& listener);
         virtual void stopRecording();
         virtual void releaseRecordingFrame(const sp<IMemory>& mem);
+        virtual void releaseRecordingFrameHandle(native_handle_t* handle);
 
     private:
         sp<Camera>         mCamera;
diff --git a/include/camera/CameraBase.h b/include/camera/CameraBase.h
index 1b93157..0692a27 100644
--- a/include/camera/CameraBase.h
+++ b/include/camera/CameraBase.h
@@ -18,13 +18,18 @@
 #define ANDROID_HARDWARE_CAMERA_BASE_H
 
 #include <utils/Mutex.h>
-#include <camera/ICameraService.h>
 
 struct camera_frame_metadata;
 
 namespace android {
 
-struct CameraInfo {
+namespace hardware {
+
+
+class ICameraService;
+class ICameraServiceListener;
+
+struct CameraInfo : public android::Parcelable {
     /**
      * The direction that the camera faces to. It should be CAMERA_FACING_BACK
      * or CAMERA_FACING_FRONT.
@@ -44,8 +49,17 @@
      * right of the screen, the value should be 270.
      */
     int orientation;
+
+    virtual status_t writeToParcel(Parcel* parcel) const;
+    virtual status_t readFromParcel(const Parcel* parcel);
+
 };
 
+} // namespace hardware
+
+using hardware::CameraInfo;
+
+
 template <typename TCam>
 struct CameraTraits {
 };
@@ -61,7 +75,7 @@
 
     static sp<TCam>      connect(int cameraId,
                                  const String16& clientPackageName,
-                                 int clientUid);
+                                 int clientUid, int clientPid);
     virtual void         disconnect();
 
     void                 setListener(const sp<TCamListener>& listener);
@@ -70,13 +84,13 @@
 
     static status_t      getCameraInfo(int cameraId,
                                        /*out*/
-                                       struct CameraInfo* cameraInfo);
+                                       struct hardware::CameraInfo* cameraInfo);
 
     static status_t      addServiceListener(
-                                    const sp<ICameraServiceListener>& listener);
+        const sp<::android::hardware::ICameraServiceListener>& listener);
 
     static status_t      removeServiceListener(
-                                    const sp<ICameraServiceListener>& listener);
+        const sp<::android::hardware::ICameraServiceListener>& listener);
 
     sp<TCamUser>         remote();
 
@@ -101,7 +115,7 @@
     virtual void                     binderDied(const wp<IBinder>& who);
 
     // helper function to obtain camera service handle
-    static const sp<ICameraService>& getCameraService();
+    static const sp<::android::hardware::ICameraService>& getCameraService();
 
     sp<TCamUser>                     mCamera;
     status_t                         mStatus;
diff --git a/include/camera/CameraMetadata.h b/include/camera/CameraMetadata.h
index 953d711..28f47a1 100644
--- a/include/camera/CameraMetadata.h
+++ b/include/camera/CameraMetadata.h
@@ -20,14 +20,14 @@
 #include "system/camera_metadata.h"
 #include <utils/String8.h>
 #include <utils/Vector.h>
+#include <binder/Parcelable.h>
 
 namespace android {
-class Parcel;
 
 /**
  * A convenience wrapper around the C-based camera_metadata_t library.
  */
-class CameraMetadata {
+class CameraMetadata: public Parcelable {
   public:
     /** Creates an empty object; best used when expecting to acquire contents
      * from elsewhere */
@@ -64,7 +64,7 @@
      * from getAndLock must be provided to guarantee that the right object is
      * being unlocked.
      */
-    status_t unlock(const camera_metadata_t *buffer);
+    status_t unlock(const camera_metadata_t *buffer) const;
 
     /**
      * Release a raw metadata buffer to the caller. After this call,
@@ -186,8 +186,8 @@
      */
 
     // Metadata object is unchanged when reading from parcel fails.
-    status_t readFromParcel(Parcel *parcel);
-    status_t writeToParcel(Parcel *parcel) const;
+    virtual status_t readFromParcel(const Parcel *parcel) override;
+    virtual status_t writeToParcel(Parcel *parcel) const override;
 
     /**
       * Caller becomes the owner of the new metadata
@@ -227,6 +227,15 @@
 
 };
 
-}; // namespace android
+namespace hardware {
+namespace camera2 {
+namespace impl {
+using ::android::CameraMetadata;
+typedef CameraMetadata CameraMetadataNative;
+}
+}
+}
+
+} // namespace android
 
 #endif
diff --git a/include/camera/CameraUtils.h b/include/camera/CameraUtils.h
index c06f05d..f596f80 100644
--- a/include/camera/CameraUtils.h
+++ b/include/camera/CameraUtils.h
@@ -17,8 +17,10 @@
 #ifndef ANDROID_CAMERA_CLIENT_CAMERAUTILS_H
 #define ANDROID_CAMERA_CLIENT_CAMERAUTILS_H
 
+#include <binder/IMemory.h>
 #include <camera/CameraMetadata.h>
 #include <utils/Errors.h>
+#include <utils/RefBase.h>
 
 #include <stdint.h>
 
@@ -39,6 +41,12 @@
          */
         static status_t getRotationTransform(const CameraMetadata& staticInfo,
                 /*out*/int32_t* transform);
+
+        /**
+         * Check if the image data is VideoNativeHandleMetadata, that contains a native handle.
+         */
+        static bool isNativeHandleMetadata(const sp<IMemory>& imageData);
+
     private:
         CameraUtils();
 };
diff --git a/include/camera/CaptureResult.h b/include/camera/CaptureResult.h
index 0be7d6f..45e4518 100644
--- a/include/camera/CaptureResult.h
+++ b/include/camera/CaptureResult.h
@@ -18,15 +18,21 @@
 #define ANDROID_HARDWARE_CAPTURERESULT_H
 
 #include <utils/RefBase.h>
+#include <binder/Parcelable.h>
 #include <camera/CameraMetadata.h>
 
+
 namespace android {
 
+namespace hardware {
+namespace camera2 {
+namespace impl {
+
 /**
  * CaptureResultExtras is a structure to encapsulate various indices for a capture result.
  * These indices are framework-internal and not sent to the HAL.
  */
-struct CaptureResultExtras {
+struct CaptureResultExtras : public android::Parcelable {
     /**
      * An integer to index the request sequence that this result belongs to.
      */
@@ -58,6 +64,12 @@
     int32_t partialResultCount;
 
     /**
+     * For buffer drop errors, the stream ID for the stream that lost a buffer.
+     * Otherwise -1.
+     */
+    int32_t errorStreamId;
+
+    /**
      * Constructor initializes object as invalid by setting requestId to be -1.
      */
     CaptureResultExtras()
@@ -66,7 +78,8 @@
           afTriggerId(0),
           precaptureTriggerId(0),
           frameNumber(0),
-          partialResultCount(0) {
+          partialResultCount(0),
+          errorStreamId(-1) {
     }
 
     /**
@@ -75,9 +88,14 @@
      */
     bool isValid();
 
-    status_t                readFromParcel(Parcel* parcel);
-    status_t                writeToParcel(Parcel* parcel) const;
+    virtual status_t                readFromParcel(const Parcel* parcel) override;
+    virtual status_t                writeToParcel(Parcel* parcel) const override;
 };
+} // namespace impl
+} // namespace camera2
+} // namespace hardware
+
+using hardware::camera2::impl::CaptureResultExtras;
 
 struct CaptureResult : public virtual LightRefBase<CaptureResult> {
     CameraMetadata          mMetadata;
diff --git a/include/camera/ICameraRecordingProxy.h b/include/camera/ICameraRecordingProxy.h
index 4edf9cd..cb6824a 100644
--- a/include/camera/ICameraRecordingProxy.h
+++ b/include/camera/ICameraRecordingProxy.h
@@ -18,6 +18,7 @@
 #define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
 
 #include <binder/IInterface.h>
+#include <cutils/native_handle.h>
 #include <utils/RefBase.h>
 
 namespace android {
@@ -83,12 +84,7 @@
     virtual status_t        startRecording(const sp<ICameraRecordingProxyListener>& listener) = 0;
     virtual void            stopRecording() = 0;
     virtual void            releaseRecordingFrame(const sp<IMemory>& mem) = 0;
-
-    // b/28466701
-    static  size_t          getCommonBaseAddress();
-  private:
-
-    static  uint8_t         baseObject;
+    virtual void            releaseRecordingFrameHandle(native_handle_t *handle) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/ICameraRecordingProxyListener.h b/include/camera/ICameraRecordingProxyListener.h
index b6c0624..1fee5b9 100644
--- a/include/camera/ICameraRecordingProxyListener.h
+++ b/include/camera/ICameraRecordingProxyListener.h
@@ -18,6 +18,7 @@
 #define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
 
 #include <binder/IInterface.h>
+#include <cutils/native_handle.h>
 #include <stdint.h>
 #include <utils/RefBase.h>
 #include <utils/Timers.h>
@@ -34,6 +35,9 @@
 
     virtual void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType,
                                        const sp<IMemory>& data) = 0;
+
+    virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
+                                                       native_handle_t* handle) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
deleted file mode 100644
index 1b68b5f..0000000
--- a/include/camera/ICameraService.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_ICAMERASERVICE_H
-#define ANDROID_HARDWARE_ICAMERASERVICE_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-
-namespace android {
-
-class ICamera;
-class ICameraClient;
-class ICameraServiceListener;
-class ICameraDeviceUser;
-class ICameraDeviceCallbacks;
-class CameraMetadata;
-class VendorTagDescriptor;
-class String16;
-
-class ICameraService : public IInterface
-{
-public:
-    /**
-     * Keep up-to-date with ICameraService.aidl in frameworks/base
-     */
-    enum {
-        GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION,
-        GET_CAMERA_INFO,
-        CONNECT,
-        CONNECT_DEVICE,
-        ADD_LISTENER,
-        REMOVE_LISTENER,
-        GET_CAMERA_CHARACTERISTICS,
-        GET_CAMERA_VENDOR_TAG_DESCRIPTOR,
-        GET_LEGACY_PARAMETERS,
-        SUPPORTS_CAMERA_API,
-        CONNECT_LEGACY,
-        SET_TORCH_MODE,
-        NOTIFY_SYSTEM_EVENT,
-    };
-
-    enum {
-        USE_CALLING_UID = -1
-    };
-
-    enum {
-        API_VERSION_1 = 1,
-        API_VERSION_2 = 2,
-    };
-
-    enum {
-        CAMERA_TYPE_BACKWARD_COMPATIBLE = 0,
-        CAMERA_TYPE_ALL = 1,
-    };
-
-    enum {
-        CAMERA_HAL_API_VERSION_UNSPECIFIED = -1
-    };
-
-    /**
-     * Keep up-to-date with declarations in
-     * frameworks/base/services/core/java/com/android/server/camera/CameraService.java
-     *
-     * These event codes are intended to be used with the notifySystemEvent call.
-     */
-    enum {
-        NO_EVENT = 0,
-        USER_SWITCHED,
-    };
-
-public:
-    DECLARE_META_INTERFACE(CameraService);
-
-    // Get the number of cameras that support basic color camera operation
-    // (type CAMERA_TYPE_BACKWARD_COMPATIBLE)
-    virtual int32_t  getNumberOfCameras() = 0;
-    // Get the number of cameras of the specified type, one of CAMERA_TYPE_*
-    // enums
-    virtual int32_t  getNumberOfCameras(int cameraType) = 0;
-    virtual status_t getCameraInfo(int cameraId,
-            /*out*/
-            struct CameraInfo* cameraInfo) = 0;
-
-    virtual status_t getCameraCharacteristics(int cameraId,
-            /*out*/
-            CameraMetadata* cameraInfo) = 0;
-
-    virtual status_t getCameraVendorTagDescriptor(
-            /*out*/
-            sp<VendorTagDescriptor>& desc) = 0;
-
-    // Returns 'OK' if operation succeeded
-    // - Errors: ALREADY_EXISTS if the listener was already added
-    virtual status_t addListener(const sp<ICameraServiceListener>& listener)
-                                                                            = 0;
-    // Returns 'OK' if operation succeeded
-    // - Errors: BAD_VALUE if specified listener was not in the listener list
-    virtual status_t removeListener(const sp<ICameraServiceListener>& listener)
-                                                                            = 0;
-    /**
-     * clientPackageName and clientUid are used for permissions checking.  if
-     * clientUid == USE_CALLING_UID, then the calling UID is used instead. Only
-     * trusted callers can set a clientUid other than USE_CALLING_UID.
-     */
-    virtual status_t connect(const sp<ICameraClient>& cameraClient,
-            int cameraId,
-            const String16& clientPackageName,
-            int clientUid,
-            /*out*/
-            sp<ICamera>& device) = 0;
-
-    virtual status_t connectDevice(
-            const sp<ICameraDeviceCallbacks>& cameraCb,
-            int cameraId,
-            const String16& clientPackageName,
-            int clientUid,
-            /*out*/
-            sp<ICameraDeviceUser>& device) = 0;
-
-    virtual status_t getLegacyParameters(
-            int cameraId,
-            /*out*/
-            String16* parameters) = 0;
-
-    /**
-     * Returns OK if device supports camera2 api,
-     * returns -EOPNOTSUPP if it doesn't.
-     */
-    virtual status_t supportsCameraApi(
-            int cameraId, int apiVersion) = 0;
-
-    /**
-     * Connect the device as a legacy device for a given HAL version.
-     * For halVersion, use CAMERA_API_DEVICE_VERSION_* for a particular
-     * version, or CAMERA_HAL_API_VERSION_UNSPECIFIED for a service-selected version.
-     */
-    virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient,
-            int cameraId, int halVersion,
-            const String16& clientPackageName,
-            int clientUid,
-            /*out*/
-            sp<ICamera>& device) = 0;
-
-    /**
-     * Turn on or off a camera's torch mode. Torch mode will be turned off by
-     * camera service if the lastest client binder that turns it on dies.
-     *
-     * return values:
-     * 0:       on a successful operation.
-     * -ENOSYS: the camera device doesn't support this operation. It it returned
-     *          if and only if android.flash.into.available is false.
-     * -EBUSY:  the camera device is opened.
-     * -EINVAL: camera_id is invalid or clientBinder is NULL when enabling a
-     *          torch mode.
-     */
-    virtual status_t setTorchMode(const String16& cameraId, bool enabled,
-            const sp<IBinder>& clientBinder) = 0;
-
-    /**
-     * Notify the camera service of a system event.  Should only be called from system_server.
-     */
-    virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t length) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraService: public BnInterface<ICameraService>
-{
-public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/ICameraServiceListener.h b/include/camera/ICameraServiceListener.h
deleted file mode 100644
index 709ff31..0000000
--- a/include/camera/ICameraServiceListener.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_ICAMERASERVICE_LISTENER_H
-#define ANDROID_HARDWARE_ICAMERASERVICE_LISTENER_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <hardware/camera_common.h>
-
-namespace android {
-
-class ICameraServiceListener : public IInterface
-{
-    /**
-     * Keep up-to-date with ICameraServiceListener.aidl in frameworks/base
-     */
-public:
-
-    /**
-     * Initial status will be transmitted with onStatusChange immediately
-     * after this listener is added to the service listener list.
-     *
-     * Allowed transitions:
-     *
-     *     (Any)               -> NOT_PRESENT
-     *     NOT_PRESENT         -> PRESENT
-     *     NOT_PRESENT         -> ENUMERATING
-     *     ENUMERATING         -> PRESENT
-     *     PRESENT             -> NOT_AVAILABLE
-     *     NOT_AVAILABLE       -> PRESENT
-     *
-     * A state will never immediately transition back to itself.
-     */
-    enum Status {
-        // Device physically unplugged
-        STATUS_NOT_PRESENT      = CAMERA_DEVICE_STATUS_NOT_PRESENT,
-        // Device physically has been plugged in
-        //  and the camera can be used exlusively
-        STATUS_PRESENT          = CAMERA_DEVICE_STATUS_PRESENT,
-        // Device physically has been plugged in
-        //   but it will not be connect-able until enumeration is complete
-        STATUS_ENUMERATING      = CAMERA_DEVICE_STATUS_ENUMERATING,
-
-        // Camera can be used exclusively
-        STATUS_AVAILABLE        = STATUS_PRESENT, // deprecated, will be removed
-
-        // Camera is in use by another app and cannot be used exclusively
-        STATUS_NOT_AVAILABLE    = 0x80000000,
-
-        // Use to initialize variables only
-        STATUS_UNKNOWN          = 0xFFFFFFFF,
-    };
-
-    /**
-     * The torch mode status of a camera.
-     *
-     * Initial status will be transmitted with onTorchStatusChanged immediately
-     * after this listener is added to the service listener list.
-     *
-     * The enums should be set to values matching
-     * include/hardware/camera_common.h
-     */
-    enum TorchStatus {
-        // The camera's torch mode has become not available to use via
-        // setTorchMode().
-        TORCH_STATUS_NOT_AVAILABLE  = TORCH_MODE_STATUS_NOT_AVAILABLE,
-        // The camera's torch mode is off and available to be turned on via
-        // setTorchMode().
-        TORCH_STATUS_AVAILABLE_OFF  = TORCH_MODE_STATUS_AVAILABLE_OFF,
-        // The camera's torch mode is on and available to be turned off via
-        // setTorchMode().
-        TORCH_STATUS_AVAILABLE_ON   = TORCH_MODE_STATUS_AVAILABLE_ON,
-
-        // Use to initialize variables only
-        TORCH_STATUS_UNKNOWN        = 0xFFFFFFFF,
-    };
-
-    DECLARE_META_INTERFACE(CameraServiceListener);
-
-    virtual void onStatusChanged(Status status, int32_t cameraId) = 0;
-
-    virtual void onTorchStatusChanged(TorchStatus status, const String16& cameraId) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraServiceListener : public BnInterface<ICameraServiceListener>
-{
-public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/VendorTagDescriptor.h b/include/camera/VendorTagDescriptor.h
index 1758acf..4c1cab6 100644
--- a/include/camera/VendorTagDescriptor.h
+++ b/include/camera/VendorTagDescriptor.h
@@ -16,6 +16,7 @@
 
 #ifndef VENDOR_TAG_DESCRIPTOR_H
 
+#include <binder/Parcelable.h>
 #include <utils/Vector.h>
 #include <utils/KeyedVector.h>
 #include <utils/String8.h>
@@ -26,18 +27,27 @@
 
 namespace android {
 
-class Parcel;
+class VendorTagDescriptor;
+
+namespace hardware {
+namespace camera2 {
+namespace params {
 
 /**
  * VendorTagDescriptor objects are parcelable containers for the vendor tag
  * definitions provided, and are typically used to pass the vendor tag
  * information enumerated by the HAL to clients of the camera service.
  */
-class VendorTagDescriptor
-        : public LightRefBase<VendorTagDescriptor> {
+class VendorTagDescriptor : public Parcelable {
     public:
         virtual ~VendorTagDescriptor();
 
+        VendorTagDescriptor();
+        VendorTagDescriptor(const VendorTagDescriptor& src);
+        VendorTagDescriptor& operator=(const VendorTagDescriptor& rhs);
+
+        void copyFrom(const VendorTagDescriptor& src);
+
         /**
          * The following 'get*' methods implement the corresponding
          * functions defined in
@@ -64,9 +74,9 @@
          *
          * Returns OK on success, or a negative error code.
          */
-        status_t writeToParcel(
+        virtual status_t writeToParcel(
                 /*out*/
-                Parcel* parcel) const;
+                Parcel* parcel) const override;
 
         /**
          * Convenience method to get a vector containing all vendor tag
@@ -86,48 +96,14 @@
          */
         void dump(int fd, int verbosity, int indentation) const;
 
-        // Static methods:
-
         /**
-         * Create a VendorTagDescriptor object from the given parcel.
+         * Read values VendorTagDescriptor object from the given parcel.
          *
          * Returns OK on success, or a negative error code.
          */
-        static status_t createFromParcel(const Parcel* parcel,
-                /*out*/
-                sp<VendorTagDescriptor>& descriptor);
+        virtual status_t readFromParcel(const Parcel* parcel) override;
 
-        /**
-         * Create a VendorTagDescriptor object from the given vendor_tag_ops_t
-         * struct.
-         *
-         * Returns OK on success, or a negative error code.
-         */
-        static status_t createDescriptorFromOps(const vendor_tag_ops_t* vOps,
-                /*out*/
-                sp<VendorTagDescriptor>& descriptor);
-
-        /**
-         * Sets the global vendor tag descriptor to use for this process.
-         * Camera metadata operations that access vendor tags will use the
-         * vendor tag definitions set this way.
-         *
-         * Returns OK on success, or a negative error code.
-         */
-        static status_t setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc);
-
-        /**
-         * Clears the global vendor tag descriptor used by this process.
-         */
-        static void clearGlobalVendorTagDescriptor();
-
-        /**
-         * Returns the global vendor tag descriptor used by this process.
-         * This will contain NULL if no vendor tags are defined.
-         */
-        static sp<VendorTagDescriptor> getGlobalVendorTagDescriptor();
     protected:
-        VendorTagDescriptor();
         KeyedVector<String8, KeyedVector<String8, uint32_t>*> mReverseMapping;
         KeyedVector<uint32_t, String8> mTagToNameMap;
         KeyedVector<uint32_t, uint32_t> mTagToSectionMap; // Value is offset in mSections
@@ -135,11 +111,61 @@
         SortedVector<String8> mSections;
         // must be int32_t to be compatible with Parcel::writeInt32
         int32_t mTagCount;
-    private:
+
         vendor_tag_ops mVendorOps;
 };
+} /* namespace params */
+} /* namespace camera2 */
+} /* namespace hardware */
+
+/**
+ * This version of VendorTagDescriptor must be stored in Android sp<>, and adds support for using it
+ * as a global tag descriptor.
+ *
+ * It's a child class of the basic hardware::camera2::params::VendorTagDescriptor since basic
+ * Parcelable objects cannot require being kept in an sp<> and still work with auto-generated AIDL
+ * interface implementations.
+ */
+class VendorTagDescriptor :
+            public ::android::hardware::camera2::params::VendorTagDescriptor,
+            public LightRefBase<VendorTagDescriptor> {
+
+  public:
+
+    /**
+     * Create a VendorTagDescriptor object from the given vendor_tag_ops_t
+     * struct.
+     *
+     * Returns OK on success, or a negative error code.
+     */
+    static status_t createDescriptorFromOps(const vendor_tag_ops_t* vOps,
+            /*out*/
+            sp<VendorTagDescriptor>& descriptor);
+
+    /**
+     * Sets the global vendor tag descriptor to use for this process.
+     * Camera metadata operations that access vendor tags will use the
+     * vendor tag definitions set this way.
+     *
+     * Returns OK on success, or a negative error code.
+     */
+    static status_t setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc);
+
+    /**
+     * Returns the global vendor tag descriptor used by this process.
+     * This will contain NULL if no vendor tags are defined.
+     */
+    static sp<VendorTagDescriptor> getGlobalVendorTagDescriptor();
+
+    /**
+     * Clears the global vendor tag descriptor used by this process.
+     */
+    static void clearGlobalVendorTagDescriptor();
+
+};
 
 } /* namespace android */
 
+
 #define VENDOR_TAG_DESCRIPTOR_H
 #endif /* VENDOR_TAG_DESCRIPTOR_H */
diff --git a/include/camera/ICamera.h b/include/camera/android/hardware/ICamera.h
similarity index 73%
rename from include/camera/ICamera.h
rename to include/camera/android/hardware/ICamera.h
index b025735..3b12afe 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/android/hardware/ICamera.h
@@ -21,24 +21,36 @@
 #include <binder/IInterface.h>
 #include <binder/Parcel.h>
 #include <binder/IMemory.h>
+#include <binder/Status.h>
 #include <utils/String8.h>
-#include <camera/Camera.h>
 
 namespace android {
 
-class ICameraClient;
 class IGraphicBufferProducer;
 class Surface;
 
+namespace hardware {
+
+class ICameraClient;
+
 class ICamera: public IInterface
 {
     /**
      * Keep up-to-date with ICamera.aidl in frameworks/base
      */
 public:
+    enum {
+        // Pass real YUV data in video buffers through ICameraClient.dataCallbackTimestamp().
+        VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV = 0,
+        // Pass metadata in video buffers through ICameraClient.dataCallbackTimestamp().
+        VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA = 1,
+        // Pass video buffers through IGraphicBufferProducer set with setVideoTarget().
+        VIDEO_BUFFER_MODE_BUFFER_QUEUE = 2,
+    };
+
     DECLARE_META_INTERFACE(Camera);
 
-    virtual void            disconnect() = 0;
+    virtual binder::Status  disconnect() = 0;
 
     // connect new client with existing camera remote
     virtual status_t        connect(const sp<ICameraClient>& client) = 0;
@@ -82,9 +94,13 @@
     // get recording state
     virtual bool            recordingEnabled() = 0;
 
-    // release a recording frame
+    // Release a recording frame that was received via ICameraClient::dataCallbackTimestamp.
     virtual void            releaseRecordingFrame(const sp<IMemory>& mem) = 0;
 
+    // Release a recording frame handle that was received via
+    // ICameraClient::recordingFrameHandleCallbackTimestamp.
+    virtual void            releaseRecordingFrameHandle(native_handle_t *handle) = 0;
+
     // auto focus
     virtual status_t        autoFocus() = 0;
 
@@ -109,8 +125,16 @@
     // send command to camera driver
     virtual status_t        sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) = 0;
 
-    // tell the camera hal to store meta data or real YUV data in video buffers.
-    virtual status_t        storeMetaDataInBuffers(bool enabled) = 0;
+
+    // Tell camera how to pass video buffers. videoBufferMode is one of VIDEO_BUFFER_MODE_*.
+    // Returns OK if the specified video buffer mode is supported. If videoBufferMode is
+    // VIDEO_BUFFER_MODE_BUFFER_QUEUE, setVideoTarget() must be called before starting video
+    // recording.
+    virtual status_t        setVideoBufferMode(int32_t videoBufferMode) = 0;
+
+    // Set the video buffer producer for camera to use in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
+    virtual status_t        setVideoTarget(
+            const sp<IGraphicBufferProducer>& bufferProducer) = 0;
 };
 
 // ----------------------------------------------------------------------------
@@ -124,6 +148,7 @@
                                     uint32_t flags = 0);
 };
 
-}; // namespace android
+} // namespace hardware
+} // namespace android
 
 #endif
diff --git a/include/camera/ICameraClient.h b/include/camera/android/hardware/ICameraClient.h
similarity index 82%
rename from include/camera/ICameraClient.h
rename to include/camera/android/hardware/ICameraClient.h
index 1584dba..3f835a9 100644
--- a/include/camera/ICameraClient.h
+++ b/include/camera/android/hardware/ICameraClient.h
@@ -25,12 +25,10 @@
 #include <system/camera.h>
 
 namespace android {
+namespace hardware {
 
 class ICameraClient: public IInterface
 {
-    /**
-     * Keep up-to-date with ICameraClient.aidl in frameworks/base
-     */
 public:
     DECLARE_META_INTERFACE(CameraClient);
 
@@ -38,6 +36,11 @@
     virtual void            dataCallback(int32_t msgType, const sp<IMemory>& data,
                                          camera_frame_metadata_t *metadata) = 0;
     virtual void            dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& data) = 0;
+
+    // Invoked to send a recording frame handle with a timestamp. Call
+    // ICamera::releaseRecordingFrameHandle to release the frame handle.
+    virtual void            recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
+                                         native_handle_t* handle) = 0;
 };
 
 // ----------------------------------------------------------------------------
@@ -51,6 +54,7 @@
                                     uint32_t flags = 0);
 };
 
-}; // namespace android
+} // namespace hardware
+} // namespace android
 
 #endif
diff --git a/include/camera/camera2/CaptureRequest.h b/include/camera/camera2/CaptureRequest.h
index eeab217..c989f26 100644
--- a/include/camera/camera2/CaptureRequest.h
+++ b/include/camera/camera2/CaptureRequest.h
@@ -19,15 +19,17 @@
 
 #include <utils/RefBase.h>
 #include <utils/Vector.h>
+#include <binder/Parcelable.h>
 #include <camera/CameraMetadata.h>
 
 namespace android {
 
 class Surface;
 
-struct CaptureRequest : public virtual RefBase {
-public:
+namespace hardware {
+namespace camera2 {
 
+struct CaptureRequest : public Parcelable {
     CameraMetadata          mMetadata;
     Vector<sp<Surface> >    mSurfaceList;
     bool                    mIsReprocess;
@@ -35,9 +37,20 @@
     /**
      * Keep impl up-to-date with CaptureRequest.java in frameworks/base
      */
-    status_t                readFromParcel(Parcel* parcel);
-    status_t                writeToParcel(Parcel* parcel) const;
+    status_t                readFromParcel(const Parcel* parcel) override;
+    status_t                writeToParcel(Parcel* parcel) const override;
 };
-}; // namespace android
+
+} // namespace camera2
+} // namespace hardware
+
+struct CaptureRequest :
+        public RefBase, public hardware::camera2::CaptureRequest {
+  public:
+    // Same as android::hardware::camera2::CaptureRequest, except that you can
+    // put this in an sp<>
+};
+
+} // namespace android
 
 #endif
diff --git a/include/camera/camera2/ICameraDeviceCallbacks.h b/include/camera/camera2/ICameraDeviceCallbacks.h
deleted file mode 100644
index c57b39f..0000000
--- a/include/camera/camera2/ICameraDeviceCallbacks.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
-#define ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <utils/Timers.h>
-#include <system/camera.h>
-
-#include <camera/CaptureResult.h>
-
-namespace android {
-class CameraMetadata;
-
-
-class ICameraDeviceCallbacks : public IInterface
-{
-    /**
-     * Keep up-to-date with ICameraDeviceCallbacks.aidl in frameworks/base
-     */
-public:
-    DECLARE_META_INTERFACE(CameraDeviceCallbacks);
-
-    /**
-     * Error codes for CAMERA_MSG_ERROR
-     */
-    enum CameraErrorCode {
-        ERROR_CAMERA_INVALID_ERROR = -1, // To indicate all invalid error codes
-        ERROR_CAMERA_DISCONNECTED = 0,
-        ERROR_CAMERA_DEVICE = 1,
-        ERROR_CAMERA_SERVICE = 2,
-        ERROR_CAMERA_REQUEST = 3,
-        ERROR_CAMERA_RESULT = 4,
-        ERROR_CAMERA_BUFFER = 5,
-    };
-
-    // One way
-    virtual void            onDeviceError(CameraErrorCode errorCode,
-                                          const CaptureResultExtras& resultExtras) = 0;
-
-    // One way
-    virtual void            onDeviceIdle() = 0;
-
-    // One way
-    virtual void            onCaptureStarted(const CaptureResultExtras& resultExtras,
-                                             int64_t timestamp) = 0;
-
-    // One way
-    virtual void            onResultReceived(const CameraMetadata& metadata,
-                                             const CaptureResultExtras& resultExtras) = 0;
-
-    // One way
-    virtual void            onPrepared(int streamId) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraDeviceCallbacks : public BnInterface<ICameraDeviceCallbacks>
-{
-public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
deleted file mode 100644
index 4d8eb53..0000000
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
-#define ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
-
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <utils/List.h>
-
-struct camera_metadata;
-
-namespace android {
-
-class ICameraDeviceUserClient;
-class IGraphicBufferProducer;
-class CaptureRequest;
-class CameraMetadata;
-class OutputConfiguration;
-
-enum {
-    NO_IN_FLIGHT_REPEATING_FRAMES = -1,
-};
-
-class ICameraDeviceUser : public IInterface
-{
-    /**
-     * Keep up-to-date with ICameraDeviceUser.aidl in frameworks/base
-     */
-public:
-    DECLARE_META_INTERFACE(CameraDeviceUser);
-
-    virtual void            disconnect() = 0;
-
-    /**
-     * Request Handling
-     **/
-
-    /**
-     * For streaming requests, output lastFrameNumber is the last frame number
-     * of the previous repeating request.
-     * For non-streaming requests, output lastFrameNumber is the expected last
-     * frame number of the current request.
-     */
-    virtual int             submitRequest(sp<CaptureRequest> request,
-                                          bool streaming = false,
-                                          /*out*/
-                                          int64_t* lastFrameNumber = NULL) = 0;
-
-    /**
-     * For streaming requests, output lastFrameNumber is the last frame number
-     * of the previous repeating request.
-     * For non-streaming requests, output lastFrameNumber is the expected last
-     * frame number of the current request.
-     */
-    virtual int             submitRequestList(List<sp<CaptureRequest> > requestList,
-                                              bool streaming = false,
-                                              /*out*/
-                                              int64_t* lastFrameNumber = NULL) = 0;
-
-    /**
-     * Output lastFrameNumber is the last frame number of the previous repeating request.
-     */
-    virtual status_t        cancelRequest(int requestId,
-                                          /*out*/
-                                          int64_t* lastFrameNumber = NULL) = 0;
-
-    /**
-     * Begin the device configuration.
-     *
-     * <p>
-     * beginConfigure must be called before any call to deleteStream, createStream,
-     * or endConfigure.  It is not valid to call this when the device is not idle.
-     * <p>
-     */
-    virtual status_t        beginConfigure() = 0;
-
-    /**
-     * End the device configuration.
-     *
-     * <p>
-     * endConfigure must be called after stream configuration is complete (i.e. after
-     * a call to beginConfigure and subsequent createStream/deleteStream calls).  This
-     * must be called before any requests can be submitted.
-     * <p>
-     */
-    virtual status_t        endConfigure(bool isConstrainedHighSpeed = false) = 0;
-
-    virtual status_t        deleteStream(int streamId) = 0;
-
-    virtual status_t        createStream(const OutputConfiguration& outputConfiguration) = 0;
-
-    /**
-     * Create an input stream of width, height, and format (one of
-     * HAL_PIXEL_FORMAT_*)
-     *
-     * Return stream ID if it's a non-negative value. status_t if it's a
-     * negative value.
-     */
-    virtual status_t        createInputStream(int width, int height, int format) = 0;
-
-    // get the buffer producer of the input stream
-    virtual status_t        getInputBufferProducer(
-            sp<IGraphicBufferProducer> *producer) = 0;
-
-    // Create a request object from a template.
-    virtual status_t        createDefaultRequest(int templateId,
-                                                 /*out*/
-                                                 CameraMetadata* request) = 0;
-    // Get static camera metadata
-    virtual status_t        getCameraInfo(/*out*/
-                                          CameraMetadata* info) = 0;
-
-    // Wait until all the submitted requests have finished processing
-    virtual status_t        waitUntilIdle() =  0;
-
-    /**
-     * Flush all pending and in-progress work as quickly as possible.
-     * Output lastFrameNumber is the last frame number of the previous repeating request.
-     */
-    virtual status_t        flush(/*out*/
-                                  int64_t* lastFrameNumber = NULL) = 0;
-
-    /**
-     * Preallocate buffers for a given output stream asynchronously.
-     */
-    virtual status_t        prepare(int streamId) = 0;
-
-    /**
-     * Preallocate up to maxCount buffers for a given output stream asynchronously.
-     */
-    virtual status_t        prepare2(int maxCount, int streamId) = 0;
-
-    /**
-     * Free all unused buffers for a given output stream.
-     */
-    virtual status_t        tearDown(int streamId) = 0;
-
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraDeviceUser: public BnInterface<ICameraDeviceUser>
-{
-public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
index 5bcbe15..72a3753 100644
--- a/include/camera/camera2/OutputConfiguration.h
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -17,37 +17,80 @@
 #ifndef ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
 #define ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
 
-#include <utils/RefBase.h>
 #include <gui/IGraphicBufferProducer.h>
+#include <binder/Parcelable.h>
 
 namespace android {
 
 class Surface;
 
-class OutputConfiguration : public virtual RefBase {
+namespace hardware {
+namespace camera2 {
+namespace params {
+
+class OutputConfiguration : public android::Parcelable {
 public:
 
     static const int INVALID_ROTATION;
+    static const int INVALID_SET_ID;
     sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
     int                        getRotation() const;
+    int                        getSurfaceSetID() const;
 
     /**
      * Keep impl up-to-date with OutputConfiguration.java in frameworks/base
      */
-    status_t                   writeToParcel(Parcel& parcel) const;
+    virtual status_t           writeToParcel(Parcel* parcel) const override;
+
+    virtual status_t           readFromParcel(const Parcel* parcel) override;
+
+    // getGraphicBufferProducer will be NULL
+    // getRotation will be INVALID_ROTATION
+    // getSurfaceSetID will be INVALID_SET_ID
+    OutputConfiguration();
+
     // getGraphicBufferProducer will be NULL if error occurred
     // getRotation will be INVALID_ROTATION if error occurred
+    // getSurfaceSetID will be INVALID_SET_ID if error occurred
     OutputConfiguration(const Parcel& parcel);
 
-    OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation);
+    OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
+            int surfaceSetID = INVALID_SET_ID);
+
+    bool operator == (const OutputConfiguration& other) const {
+        return (mGbp == other.mGbp &&
+                mRotation == other.mRotation &&
+                mSurfaceSetID == other.mSurfaceSetID);
+    }
+    bool operator != (const OutputConfiguration& other) const {
+        return !(*this == other);
+    }
+    bool operator < (const OutputConfiguration& other) const {
+        if (*this == other) return false;
+        if (mGbp != other.mGbp) return mGbp < other.mGbp;
+        if (mSurfaceSetID != other.mSurfaceSetID) {
+            return mSurfaceSetID < other.mSurfaceSetID;
+        }
+        return mRotation < other.mRotation;
+    }
+    bool operator > (const OutputConfiguration& other) const {
+        return (*this != other && !(*this < other));
+    }
 
 private:
     sp<IGraphicBufferProducer> mGbp;
     int                        mRotation;
-
+    int                        mSurfaceSetID;
     // helper function
-    static String16 readMaybeEmptyString16(const Parcel& parcel);
+    static String16 readMaybeEmptyString16(const Parcel* parcel);
 };
+} // namespace params
+} // namespace camera2
+} // namespace hardware
+
+
+using hardware::camera2::params::OutputConfiguration;
+
 }; // namespace android
 
 #endif
diff --git a/include/camera/camera2/SubmitInfo.h b/include/camera/camera2/SubmitInfo.h
new file mode 100644
index 0000000..3b47b32
--- /dev/null
+++ b/include/camera/camera2/SubmitInfo.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA2_UTIL_SUBMITINFO_H
+#define ANDROID_HARDWARE_CAMERA2_UTIL_SUBMITINFO_H
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+namespace hardware {
+namespace camera2 {
+namespace utils {
+
+struct SubmitInfo : public android::Parcelable {
+public:
+
+    int32_t mRequestId;
+    int64_t mLastFrameNumber;
+
+    virtual status_t writeToParcel(Parcel *parcel) const override;
+    virtual status_t readFromParcel(const Parcel* parcel) override;
+
+};
+
+} // namespace utils
+} // namespace camera2
+} // namespace hardware
+} // namespace android
+
+#endif
diff --git a/include/camera/ndk/NdkCameraCaptureSession.h b/include/camera/ndk/NdkCameraCaptureSession.h
new file mode 100644
index 0000000..7b314e9
--- /dev/null
+++ b/include/camera/ndk/NdkCameraCaptureSession.h
@@ -0,0 +1,597 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraCaptureSession.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+#include <android/native_window.h>
+#include "NdkCameraError.h"
+#include "NdkCameraMetadata.h"
+
+#ifndef _NDK_CAMERA_CAPTURE_SESSION_H
+#define _NDK_CAMERA_CAPTURE_SESSION_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * ACameraCaptureSession is an opaque type that manages frame captures of a camera device.
+ *
+ * A pointer can be obtained using {@link ACameraDevice_createCaptureSession} method.
+ */
+typedef struct ACameraCaptureSession ACameraCaptureSession;
+
+/**
+ * The definition of camera capture session state callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_stateCallbacks}.
+ * @param session The camera capture session whose state is changing.
+ */
+typedef void (*ACameraCaptureSession_stateCallback)(void* context, ACameraCaptureSession *session);
+
+typedef struct ACameraCaptureSession_stateCallbacks {
+    /// optional application context.
+    void*                               context;
+
+    /**
+     * This callback is called when the session is closed and deleted from memory.
+     *
+     * <p>A session is closed when {@link ACameraCaptureSession_close} is called, a new session
+     * is created by the parent camera device,
+     * or when the parent camera device is closed (either by the user closing the device,
+     * or due to a camera device disconnection or fatal error).</p>
+     *
+     * <p>Once this callback is called, all access to this ACameraCaptureSession object will cause
+     * a crash.</p>
+     */
+    ACameraCaptureSession_stateCallback onClosed;
+
+    /**
+     * This callback is called every time the session has no more capture requests to process.
+     *
+     * <p>This callback will be invoked any time the session finishes processing
+     * all of its active capture requests, and no repeating request or burst is set up.</p>
+     */
+    ACameraCaptureSession_stateCallback onReady;
+
+    /**
+     * This callback is called when the session starts actively processing capture requests.
+     *
+     * <p>If the session runs out of capture requests to process and calls {@link onReady},
+     * then this callback will be invoked again once new requests are submitted for capture.</p>
+     */
+    ACameraCaptureSession_stateCallback onActive;
+} ACameraCaptureSession_stateCallbacks;
+
+/// Enum for describing error reason in {@link ACameraCaptureFailure}
+enum {
+    /**
+     * The capture session has dropped this frame due to an
+     * {@link ACameraCaptureSession_abortCaptures} call.
+     */
+    CAPTURE_FAILURE_REASON_FLUSHED = 0,
+
+    /**
+     * The capture session has dropped this frame due to an error in the framework.
+     */
+    CAPTURE_FAILURE_REASON_ERROR
+};
+
+/// Struct to describe a capture failure
+typedef struct ACameraCaptureFailure {
+    /**
+     * The frame number associated with this failed capture.
+     *
+     * <p>Whenever a request has been processed, regardless of failed capture or success,
+     * it gets a unique frame number assigned to its future result/failed capture.</p>
+     *
+     * <p>This value monotonically increments, starting with 0,
+     * for every new result or failure; and the scope is the lifetime of the
+     * {@link ACameraDevice}.</p>
+     */
+    int64_t frameNumber;
+
+    /**
+     * Determine why the request was dropped, whether due to an error or to a user
+     * action.
+     *
+     * @see CAPTURE_FAILURE_REASON_ERROR
+     * @see CAPTURE_FAILURE_REASON_FLUSHED
+     */
+    int     reason;
+
+    /**
+     * The sequence ID for this failed capture that was returned by the
+     * {@link ACameraCaptureSession_capture} or {@link ACameraCaptureSession_setRepeatingRequest}.
+     *
+     * <p>The sequence ID is a unique monotonically increasing value starting from 0,
+     * incremented every time a new group of requests is submitted to the ACameraDevice.</p>
+     */
+    int     sequenceId;
+
+    /**
+     * Determine if the image was captured from the camera.
+     *
+     * <p>If the image was not captured, no image buffers will be available.
+     * If the image was captured, then image buffers may be available.</p>
+     *
+     */
+    bool    wasImageCaptured;
+} ACameraCaptureFailure;
+
+/**
+ * The definition of camera capture start callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request that is starting. Note that this pointer points to a copy of
+ *                capture request sent by application, so the address is different to what
+ *                application sent but the content will match. This request will be freed by
+ *                framework immediately after this callback returns.
+ * @param timestamp The timestamp when the capture is started. This timestmap will match
+ *                  {@link ACAMERA_SENSOR_TIMESTAMP} of the {@link ACameraMetadata} in
+ *                  {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted} callback.
+ */
+typedef void (*ACameraCaptureSession_captureCallback_start)(
+        void* context, ACameraCaptureSession* session,
+        const ACaptureRequest* request, int64_t timestamp);
+
+/**
+ * The definition of camera capture progress/result callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ *                capture request sent by application, so the address is different to what
+ *                application sent but the content will match. This request will be freed by
+ *                framework immediately after this callback returns.
+ * @param result The capture result metadata reported by camera device. The memory is managed by
+ *                camera framework. Do not access this pointer after this callback returns.
+ */
+typedef void (*ACameraCaptureSession_captureCallback_result)(
+        void* context, ACameraCaptureSession* session,
+        ACaptureRequest* request, const ACameraMetadata* result);
+
+/**
+ * The definition of camera capture failure callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ *                capture request sent by application, so the address is different to what
+ *                application sent but the content will match. This request will be freed by
+ *                framework immediately after this callback returns.
+ * @param failure The {@link ACameraCaptureFailure} desribes the capture failure. The memory is
+ *                managed by camera framework. Do not access this pointer after this callback
+ *                returns.
+ */
+typedef void (*ACameraCaptureSession_captureCallback_failed)(
+        void* context, ACameraCaptureSession* session,
+        ACaptureRequest* request, ACameraCaptureFailure* failure);
+
+/**
+ * The definition of camera sequence end callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param sequenceId The capture sequence ID of the finished sequence.
+ * @param frameNumber The frame number of the last frame of this sequence.
+ */
+typedef void (*ACameraCaptureSession_captureCallback_sequenceEnd)(
+        void* context, ACameraCaptureSession* session,
+        int sequenceId, int64_t frameNumber);
+
+/**
+ * The definition of camera sequence aborted callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param sequenceId The capture sequence ID of the aborted sequence.
+ */
+typedef void (*ACameraCaptureSession_captureCallback_sequenceAbort)(
+        void* context, ACameraCaptureSession* session,
+        int sequenceId);
+
+/**
+ * The definition of camera buffer lost callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ *                capture request sent by application, so the address is different to what
+ *                application sent but the content will match. This request will be freed by
+ *                framework immediately after this callback returns.
+ * @param window The {@link ANativeWindow} that the lost buffer would have been sent to.
+ * @param frameNumber The frame number of the lost buffer.
+ */
+typedef void (*ACameraCaptureSession_captureCallback_bufferLost)(
+        void* context, ACameraCaptureSession* session,
+        ACaptureRequest* request, ANativeWindow* window, int64_t frameNumber);
+
+typedef struct ACameraCaptureSession_captureCallbacks {
+    /// optional application context.
+    void*                                               context;
+
+    /**
+     * This callback is called when the camera device has started capturing
+     * the output image for the request, at the beginning of image exposure.
+     *
+     * <p>This callback is invoked right as
+     * the capture of a frame begins, so it is the most appropriate time
+     * for playing a shutter sound, or triggering UI indicators of capture.</p>
+     *
+     * <p>The request that is being used for this capture is provided, along
+     * with the actual timestamp for the start of exposure.
+     * This timestamp matches the timestamps that will be
+     * included in {@link ACAMERA_SENSOR_TIMESTAMP} of the {@link ACameraMetadata} in
+     * {@link onCaptureCompleted} callback,
+     * and in the buffers sent to each output ANativeWindow. These buffer
+     * timestamps are accessible through, for example,
+     * {@link AImage_getTimestamp} or
+     * <a href="http://developer.android.com/reference/android/graphics/SurfaceTexture.html#getTimestamp()">
+     * android.graphics.SurfaceTexture#getTimestamp()</a>.</p>
+     *
+     * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+     * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+     *
+     */
+    ACameraCaptureSession_captureCallback_start         onCaptureStarted;
+
+    /**
+     * This callback is called when an image capture makes partial forward progress; some
+     * (but not all) results from an image capture are available.
+     *
+     * <p>The result provided here will contain some subset of the fields of
+     * a full result. Multiple {@link onCaptureProgressed} calls may happen per
+     * capture; a given result field will only be present in one partial
+     * capture at most. The final {@link onCaptureCompleted} call will always
+     * contain all the fields (in particular, the union of all the fields of all
+     * the partial results composing the total result).</p>
+     *
+     * <p>For each request, some result data might be available earlier than others. The typical
+     * delay between each partial result (per request) is a single frame interval.
+     * For performance-oriented use-cases, applications should query the metadata they need
+     * to make forward progress from the partial results and avoid waiting for the completed
+     * result.</p>
+     *
+     * <p>For a particular request, {@link onCaptureProgressed} may happen before or after
+     * {@link onCaptureStarted}.</p>
+     *
+     * <p>Each request will generate at least `1` partial results, and at most
+     * {@link ACAMERA_REQUEST_PARTIAL_RESULT_COUNT} partial results.</p>
+     *
+     * <p>Depending on the request settings, the number of partial results per request
+     * will vary, although typically the partial count could be the same as long as the
+     * camera device subsystems enabled stay the same.</p>
+     *
+     * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+     * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+     */
+    ACameraCaptureSession_captureCallback_result        onCaptureProgressed;
+
+    /**
+     * This callback is called when an image capture has fully completed and all the
+     * result metadata is available.
+     *
+     * <p>This callback will always fire after the last {@link onCaptureProgressed};
+     * in other words, no more partial results will be delivered once the completed result
+     * is available.</p>
+     *
+     * <p>For performance-intensive use-cases where latency is a factor, consider
+     * using {@link onCaptureProgressed} instead.</p>
+     *
+     * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+     * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+     */
+    ACameraCaptureSession_captureCallback_result        onCaptureCompleted;
+
+    /**
+     * This callback is called instead of {@link onCaptureCompleted} when the
+     * camera device failed to produce a capture result for the
+     * request.
+     *
+     * <p>Other requests are unaffected, and some or all image buffers from
+     * the capture may have been pushed to their respective output
+     * streams.</p>
+     *
+     * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+     * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+     *
+     * @see ACameraCaptureFailure
+     */
+    ACameraCaptureSession_captureCallback_failed        onCaptureFailed;
+
+    /**
+     * This callback is called independently of the others in {@link ACameraCaptureSession_captureCallbacks},
+     * when a capture sequence finishes and all capture result
+     * or capture failure for it have been returned via this {@link ACameraCaptureSession_captureCallbacks}.
+     *
+     * <p>In total, there will be at least one result/failure returned by this listener
+     * before this callback is invoked. If the capture sequence is aborted before any
+     * requests have been processed, {@link onCaptureSequenceAborted} is invoked instead.</p>
+     */
+    ACameraCaptureSession_captureCallback_sequenceEnd   onCaptureSequenceCompleted;
+
+    /**
+     * This callback is called independently of the others in {@link ACameraCaptureSession_captureCallbacks},
+     * when a capture sequence aborts before any capture result
+     * or capture failure for it have been returned via this {@link ACameraCaptureSession_captureCallbacks}.
+     *
+     * <p>Due to the asynchronous nature of the camera device, not all submitted captures
+     * are immediately processed. It is possible to clear out the pending requests
+     * by a variety of operations such as {@link ACameraCaptureSession_stopRepeating} or
+     * {@link ACameraCaptureSession_abortCaptures}. When such an event happens,
+     * {@link onCaptureSequenceCompleted} will not be called.</p>
+     */
+    ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
+
+    /**
+     * This callback is called if a single buffer for a capture could not be sent to its
+     * destination ANativeWindow.
+     *
+     * <p>If the whole capture failed, then {@link onCaptureFailed} will be called instead. If
+     * some but not all buffers were captured but the result metadata will not be available,
+     * then onCaptureFailed will be invoked with {@link ACameraCaptureFailure#wasImageCaptured}
+     * returning true, along with one or more calls to {@link onCaptureBufferLost} for the
+     * failed outputs.</p>
+     *
+     * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+     * submitted, but the contents the ACaptureRequest will match what application submitted.
+     * The ANativeWindow pointer will always match what application submitted in
+     * {@link ACameraDevice_createCaptureSession}</p>
+     *
+     */
+    ACameraCaptureSession_captureCallback_bufferLost    onCaptureBufferLost;
+} ACameraCaptureSession_captureCallbacks;
+
+enum {
+    CAPTURE_SEQUENCE_ID_NONE = -1
+};
+
+/**
+ * Close this capture session.
+ *
+ * <p>Closing a session frees up the target output Surfaces of the session for reuse with either
+ * a new session, or to other APIs that can draw to Surfaces.</p>
+ *
+ * <p>Note that creating a new capture session with {@link ACameraDevice_createCaptureSession}
+ * will close any existing capture session automatically, and call the older session listener's
+ * {@link ACameraCaptureSession_stateCallbacks#onClosed} callback. Using
+ * {@link ACameraDevice_createCaptureSession} directly without closing is the recommended approach
+ * for quickly switching to a new session, since unchanged target outputs can be reused more
+ * efficiently.</p>
+ *
+ * <p>After a session is closed and before {@link ACameraCaptureSession_stateCallbacks#onClosed}
+ * is called, all methods invoked on the session will return {@link ACAMERA_ERROR_SESSION_CLOSED},
+ * and any repeating requests are stopped (as if {@link ACameraCaptureSession_stopRepeating} was
+ * called). However, any in-progress capture requests submitted to the session will be completed as
+ * normal; once all captures have completed and the session has been torn down,
+ * {@link ACameraCaptureSession_stateCallbacks#onClosed} callback will be called and the seesion
+ * will be removed from memory.</p>
+ *
+ * <p>Closing a session is idempotent; closing more than once has no effect.</p>
+ *
+ * @param session the capture session of interest
+ */
+void ACameraCaptureSession_close(ACameraCaptureSession* session);
+
+struct ACameraDevice;
+typedef struct ACameraDevice ACameraDevice;
+
+/**
+ * Get the ACameraDevice pointer associated with this capture session in the device argument
+ * if the method succeeds.
+ *
+ * @param session the capture session of interest
+ * @param device the {@link ACameraDevice} associated with session. Will be set to NULL
+ *        if the session is closed or this method fails.
+ * @return <ul><li>
+ *             {@link ACAMERA_OK} if the method call succeeds. The {@link ACameraDevice}
+ *                                will be stored in device argument</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session or device is NULL</li>
+ *         <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
+ *
+ */
+camera_status_t ACameraCaptureSession_getDevice(
+        ACameraCaptureSession* session, /*out*/ACameraDevice** device);
+
+/**
+ * Submit an array of requests to be captured in sequence as a burst in the minimum of time possible.
+ *
+ * <p>The burst will be captured in the minimum amount of time possible, and will not be
+ * interleaved with requests submitted by other capture or repeat calls.</p>
+ *
+ * <p>Each capture produces one {@link ACameraMetadata} as a capture result and image buffers for
+ * one or more target {@link ANativeWindow}s. The target ANativeWindows (set with
+ * {@link ACaptureRequest_addTarget}) must be a subset of the ANativeWindow provided when
+ * this capture session was created.</p>
+ *
+ * @param session the capture session of interest
+ * @param callbacks the {@link ACameraCaptureSession_captureCallbacks} to be associated this capture
+ *        sequence. No capture callback will be fired if this is set to NULL.
+ * @param numRequests number of requests in requests argument. Must be at least 1.
+ * @param requests an array of {@link ACaptureRequest} to be captured. Length must be at least
+ *        numRequests.
+ * @param captureSequenceId the capture sequence ID associated with this capture method invocation
+ *        will be stored here if this argument is not NULL and the method call succeeds.
+ *        When this argument is set to NULL, the capture sequence ID will not be returned.
+ *
+ * @return <ul><li>
+ *             {@link ACAMERA_OK} if the method succeeds. captureSequenceId will be filled
+ *             if it is not NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session or requests is NULL, or
+ *             if numRequests < 1</li>
+ *         <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
+ */
+camera_status_t ACameraCaptureSession_capture(
+        ACameraCaptureSession* session,
+        /*optional*/ACameraCaptureSession_captureCallbacks* callbacks,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId);
+
+/**
+ * Request endlessly repeating capture of a sequence of images by this capture session.
+ *
+ * <p>With this method, the camera device will continually capture images,
+ * cycling through the settings in the provided list of
+ * {@link ACaptureRequest}, at the maximum rate possible.</p>
+ *
+ * <p>If a request is submitted through {@link ACameraCaptureSession_capture},
+ * the current repetition of the request list will be
+ * completed before the higher-priority request is handled. This guarantees
+ * that the application always receives a complete repeat burst captured in
+ * minimal time, instead of bursts interleaved with higher-priority
+ * captures, or incomplete captures.</p>
+ *
+ * <p>Repeating burst requests are a simple way for an application to
+ * maintain a preview or other continuous stream of frames where each
+ * request is different in a predicatable way, without having to continually
+ * submit requests through {@link ACameraCaptureSession_capture}.</p>
+ *
+ * <p>To stop the repeating capture, call {@link ACameraCaptureSession_stopRepeating}. Any
+ * ongoing burst will still be completed, however. Calling
+ * {@link ACameraCaptureSession_abortCaptures} will also clear the request.</p>
+ *
+ * <p>Calling this method will replace a previously-set repeating requests
+ * set up by this method, although any in-progress burst will be completed before the new repeat
+ * burst will be used.</p>
+ *
+ * @param session the capture session of interest
+ * @param callbacks the {@link ACameraCaptureSession_captureCallbacks} to be associated with this
+ *        capture sequence. No capture callback will be fired if callbacks is set to NULL.
+ * @param numRequests number of requests in requests array. Must be at least 1.
+ * @param requests an array of {@link ACaptureRequest} to be captured. Length must be at least
+ *        numRequests.
+ * @param captureSequenceId the capture sequence ID associated with this capture method invocation
+ *        will be stored here if this argument is not NULL and the method call succeeds.
+ *        When this argument is set to NULL, the capture sequence ID will not be returned.
+ *
+ * @return <ul><li>
+ *             {@link ACAMERA_OK} if the method succeeds. captureSequenceId will be filled
+ *             if it is not NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session or requests is NULL, or
+ *             if numRequests < 1</li>
+ *         <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for  some other reasons</li></ul>
+ */
+camera_status_t ACameraCaptureSession_setRepeatingRequest(
+        ACameraCaptureSession* session,
+        /*optional*/ACameraCaptureSession_captureCallbacks* callbacks,
+        int numRequests, ACaptureRequest** requests,
+        /*optional*/int* captureSequenceId);
+
+/**
+ * Cancel any ongoing repeating capture set by {@link ACameraCaptureSession_setRepeatingRequest}.
+ * Has no effect on requests submitted through {@link ACameraCaptureSession_capture}.
+ *
+ * <p>Any currently in-flight captures will still complete, as will any burst that is
+ * mid-capture. To ensure that the device has finished processing all of its capture requests
+ * and is in ready state, wait for the {@link ACameraCaptureSession_stateCallbacks#onReady} callback
+ * after calling this method.</p>
+ *
+ * @param session the capture session of interest
+ *
+ * @return <ul><li>
+ *             {@link ACAMERA_OK} if the method succeeds. captureSequenceId will be filled
+ *             if it is not NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
+ */
+camera_status_t ACameraCaptureSession_stopRepeating(ACameraCaptureSession* session);
+
+/**
+ * Discard all captures currently pending and in-progress as fast as possible.
+ *
+ * <p>The camera device will discard all of its current work as fast as possible. Some in-flight
+ * captures may complete successfully and call
+ * {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted},
+ * while others will trigger their {@link ACameraCaptureSession_captureCallbacks#onCaptureFailed}
+ * callbacks. If a repeating request list is set, it will be cleared.</p>
+ *
+ * <p>This method is the fastest way to switch the camera device to a new session with
+ * {@link ACameraDevice_createCaptureSession}, at the cost of discarding in-progress
+ * work. It must be called before the new session is created. Once all pending requests are
+ * either completed or thrown away, the {@link ACameraCaptureSession_stateCallbacks#onReady}
+ * callback will be called, if the session has not been closed. Otherwise, the
+ * {@link ACameraCaptureSession_stateCallbacks#onClosed}
+ * callback will be fired when a new session is created by the camera device and the previous
+ * session is being removed from memory.</p>
+ *
+ * <p>Cancelling will introduce at least a brief pause in the stream of data from the camera
+ * device, since once the camera device is emptied, the first new request has to make it through
+ * the entire camera pipeline before new output buffers are produced.</p>
+ *
+ * <p>This means that using ACameraCaptureSession_abortCaptures to simply remove pending requests is
+ * not recommended; it's best used for quickly switching output configurations, or for cancelling
+ * long in-progress requests (such as a multi-second capture).</p>
+ *
+ * @param session the capture session of interest
+ *
+ * @return <ul><li>
+ *             {@link ACAMERA_OK} if the method succeeds. captureSequenceId will be filled
+ *             if it is not NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
+ */
+camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session);
+
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // _NDK_CAMERA_CAPTURE_SESSION_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCameraDevice.h b/include/camera/ndk/NdkCameraDevice.h
new file mode 100644
index 0000000..9011cb6
--- /dev/null
+++ b/include/camera/ndk/NdkCameraDevice.h
@@ -0,0 +1,668 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraDevice.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#include <android/native_window.h>
+#include "NdkCameraError.h"
+#include "NdkCaptureRequest.h"
+#include "NdkCameraCaptureSession.h"
+
+#ifndef _NDK_CAMERA_DEVICE_H
+#define _NDK_CAMERA_DEVICE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * ACameraDevice is opaque type that provides access to a camera device.
+ *
+ * A pointer can be obtained using {@link ACameraManager_openCamera} method.
+ */
+typedef struct ACameraDevice ACameraDevice;
+
+/// Enum for ACameraDevice_ErrorStateCallback error code
+enum {
+    /**
+     * The camera device is in use already.
+     */
+    ERROR_CAMERA_IN_USE = 1,
+
+    /**
+     * The system-wide limit for number of open cameras or camera resources has
+     * been reached, and more camera devices cannot be opened until previous
+     * instances are closed.
+     */
+    ERROR_MAX_CAMERAS_IN_USE = 2,
+
+    /**
+     * The camera is disabled due to a device policy, and cannot be opened.
+     */
+    ERROR_CAMERA_DISABLED = 3,
+
+    /**
+     * The camera device has encountered a fatal error.
+     * <p>The camera device needs to be re-opened to be used again.</p>
+     */
+    ERROR_CAMERA_DEVICE = 4,
+
+    /**
+     * The camera service has encountered a fatal error.
+     * <p>The Android device may need to be shut down and restarted to restore
+     * camera function, or there may be a persistent hardware problem.
+     * An attempt at recovery may be possible by closing the
+     * CameraDevice and the CameraManager, and trying to acquire all resources
+     * again from scratch.</p>
+     */
+    ERROR_CAMERA_SERVICE = 5
+};
+
+/**
+ * Camera device state callbacks to be used in {@link ACameraDevice_stateCallbacks}.
+ *
+ * @param context The optional context in {@link ACameraDevice_stateCallbacks} will be
+ *                passed to this callback.
+ * @param device The {@link ACameraDevice} that is being disconnected.
+ */
+typedef void (*ACameraDevice_StateCallback)(void* context, ACameraDevice* device);
+
+/**
+ * Camera device error state callbacks to be used in {@link ACameraDevice_stateCallbacks}.
+ *
+ * @param context The optional context in {@link ACameraDevice_stateCallbacks} will be
+ *                passed to this callback.
+ * @param device The {@link ACameraDevice} that is being disconnected.
+ * @param error The error code describes the cause of this error callback. See the folowing
+ *              links for more detail.
+ *
+ * @see ERROR_CAMERA_IN_USE
+ * @see ERROR_MAX_CAMERAS_IN_USE
+ * @see ERROR_CAMERA_DISABLED
+ * @see ERROR_CAMERA_DEVICE
+ * @see ERROR_CAMERA_SERVICE
+ */
+typedef void (*ACameraDevice_ErrorStateCallback)(void* context, ACameraDevice* device, int error);
+
+typedef struct ACameraDevice_StateCallbacks {
+    /// optional application context.
+    void*                             context;
+
+    /**
+     * The function is  called when a camera device is no longer available for use.
+     *
+     * <p>Any attempt to call API methods on this ACameraDevice will return
+     * {@link ACAMERA_ERROR_CAMERA_DISCONNECTED}. The disconnection could be due to a
+     * change in security policy or permissions; the physical disconnection
+     * of a removable camera device; or the camera being needed for a
+     * higher-priority camera API client.</p>
+     *
+     * <p>Application should clean up the camera with {@link ACameraDevice_close} after
+     * this happens, as it is not recoverable until the camera can be opened
+     * again.</p>
+     *
+     */
+    ACameraDevice_StateCallback       onDisconnected;
+
+    /**
+     * The function called when a camera device has encountered a serious error.
+     *
+     * <p>This indicates a failure of the camera device or camera service in some way.
+     * Any attempt to call API methods on this ACameraDevice in the future will return
+     * {@link ACAMERA_ERROR_CAMERA_DISCONNECTED}.</p>
+     *
+     * <p>There may still be capture completion or camera stream callbacks that will be called
+     * after this error is received.</p>
+     *
+     * <p>Application should clean up the camera with {@link ACameraDevice_close} after this
+     * happens. Further attempts at recovery are error-code specific.</p>
+     *
+     */
+    ACameraDevice_ErrorStateCallback  onError;
+} ACameraDevice_stateCallbacks;
+
+/**
+ * Close the connection and free this ACameraDevice synchronously. Access to the ACameraDevice
+ * after calling this method will cause a crash.
+ *
+ * <p>After this call, all calls to the active ACameraCaptureSession associated to this
+ * ACameraDevice will return {@link ACAMERA_ERROR_SESSION_CLOSED} except for calls to
+ * {@link ACameraCaptureSession_close}.</p>
+ *
+ * <p>This method will stop all repeating captures sent via
+ * {@link ACameraCaptureSession_setRepeatingRequest} and block until all capture requests sent via
+ * {@link ACameraCaptureSession_capture} is complete. Once the method returns, the camera device
+ * will be removed from memory and access to the closed camera device pointer will cause a crash.</p>
+ *
+ * @param device the camera device to be closed
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device is NULL.</li></ul>
+ */
+camera_status_t ACameraDevice_close(ACameraDevice* device);
+
+/**
+ * Return the camera id associated with this camera device.
+ *
+ * @param device the camera device to be closed
+ *
+ * @return camera ID string. The returned string is managed by framework and should not be
+ * delete/free by the application. Also the returned string must not be used after the device
+ * has been closed.
+ */
+const char* ACameraDevice_getId(const ACameraDevice* device);
+
+typedef enum {
+    /**
+     * Create a request suitable for a camera preview window. Specifically, this
+     * means that high frame rate is given priority over the highest-quality
+     * post-processing. These requests would normally be used with the
+     * {@link ACameraCaptureSession_setRepeatingRequest} method.
+     * This template is guaranteed to be supported on all camera devices.
+     *
+     * @see ACameraDevice_createCaptureRequest
+     */
+    TEMPLATE_PREVIEW = 1,
+
+    /**
+     * Create a request suitable for still image capture. Specifically, this
+     * means prioritizing image quality over frame rate. These requests would
+     * commonly be used with the {@link ACameraCaptureSession_capture} method.
+     * This template is guaranteed to be supported on all camera devices.
+     *
+     * @see ACameraDevice_createCaptureRequest
+     */
+    TEMPLATE_STILL_CAPTURE = 2,
+
+    /**
+     * Create a request suitable for video recording. Specifically, this means
+     * that a stable frame rate is used, and post-processing is set for
+     * recording quality. These requests would commonly be used with the
+     * {@link ACameraCaptureSession_setRepeatingRequest} method.
+     * This template is guaranteed to be supported on all camera devices.
+     *
+     * @see ACameraDevice_createCaptureRequest
+     */
+    TEMPLATE_RECORD = 3,
+
+    /**
+     * Create a request suitable for still image capture while recording
+     * video. Specifically, this means maximizing image quality without
+     * disrupting the ongoing recording. These requests would commonly be used
+     * with the {@link ACameraCaptureSession_capture} method while a request based on
+     * {@link TEMPLATE_RECORD} is is in use with {@link ACameraCaptureSession_setRepeatingRequest}.
+     * This template is guaranteed to be supported on all camera devices.
+     *
+     * @see ACameraDevice_createCaptureRequest
+     */
+    TEMPLATE_VIDEO_SNAPSHOT = 4,
+
+    /**
+     * Create a request suitable for zero shutter lag still capture. This means
+     * means maximizing image quality without compromising preview frame rate.
+     * AE/AWB/AF should be on auto mode.
+     *
+     * @see ACameraDevice_createCaptureRequest
+     */
+    TEMPLATE_ZERO_SHUTTER_LAG = 5,
+
+    /**
+     * A basic template for direct application control of capture
+     * parameters. All automatic control is disabled (auto-exposure, auto-white
+     * balance, auto-focus), and post-processing parameters are set to preview
+     * quality. The manual capture parameters (exposure, sensitivity, and so on)
+     * are set to reasonable defaults, but should be overriden by the
+     * application depending on the intended use case.
+     * This template is guaranteed to be supported on camera devices that support the
+     * {@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR} capability.
+     *
+     * @see ACameraDevice_createCaptureRequest
+     */
+    TEMPLATE_MANUAL = 6,
+} ACameraDevice_request_template;
+
+/**
+ * Create a ACaptureRequest for capturing images, initialized with template
+ * for a target use case.
+ *
+ * <p>The settings are chosen to be the best options for this camera device,
+ * so it is not recommended to reuse the same request for a different camera device.</p>
+ *
+ * @param device the camera device of interest
+ * @param templateId the type of capture request to be created.
+ *        See {@link ACameraDevice_request_template}.
+ * @param request the output request will be stored here if the method call succeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds. The created capture request will be
+ *                                filled in request argument.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device or request is NULL, templateId
+ *                                is undefined or camera device does not support requested template.
+ *                                </li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error.</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ *
+ * @see TEMPLATE_PREVIEW
+ * @see TEMPLATE_RECORD
+ * @see TEMPLATE_STILL_CAPTURE
+ * @see TEMPLATE_VIDEO_SNAPSHOT
+ * @see TEMPLATE_MANUAL
+ */
+camera_status_t ACameraDevice_createCaptureRequest(
+        const ACameraDevice* device, ACameraDevice_request_template templateId,
+        /*out*/ACaptureRequest** request);
+
+
+typedef struct ACaptureSessionOutputContainer ACaptureSessionOutputContainer;
+
+typedef struct ACaptureSessionOutput ACaptureSessionOutput;
+
+/**
+ * Create a capture session output container.
+ *
+ * <p>The container is used in {@link ACameraDevice_createCaptureSession} method to create a capture
+ * session. Use {@link ACaptureSessionOutputContainer_free} to free the container and its memory
+ * after application no longer needs the ACaptureSessionOutputContainer.</p>
+ *
+ * @param container the output {@link ACaptureSessionOutputContainer} will be stored here if the
+ *                  method call succeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds. The created container will be
+ *                                filled in container argument.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container is NULL.</li></ul>
+ */
+camera_status_t ACaptureSessionOutputContainer_create(
+        /*out*/ACaptureSessionOutputContainer** container);
+
+/**
+ * Free a capture session output container.
+ *
+ * @param container the {@link ACaptureSessionOutputContainer} to be freed.
+ *
+ * @see ACaptureSessionOutputContainer_create
+ */
+void            ACaptureSessionOutputContainer_free(ACaptureSessionOutputContainer* container);
+
+/**
+ * Create a ACaptureSessionOutput object.
+ *
+ * <p>The ACaptureSessionOutput is used in {@link ACaptureSessionOutputContainer_add} method to add
+ * an output {@link ANativeWindow} to ACaptureSessionOutputContainer. Use
+ * {@link ACaptureSessionOutput_free} to free the object and its memory after application no longer
+ * needs the {@link ACaptureSessionOutput}.</p>
+ *
+ * @param anw the {@link ANativeWindow} to be associated with the {@link ACaptureSessionOutput}
+ * @param output the output {@link ACaptureSessionOutput} will be stored here if the
+ *                  method call succeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds. The created container will be
+ *                                filled in the output argument.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if anw or output is NULL.</li></ul>
+ *
+ * @see ACaptureSessionOutputContainer_add
+ */
+camera_status_t ACaptureSessionOutput_create(
+        ANativeWindow* anw, /*out*/ACaptureSessionOutput** output);
+
+/**
+ * Free a ACaptureSessionOutput object.
+ *
+ * @param output the {@link ACaptureSessionOutput} to be freed.
+ *
+ * @see ACaptureSessionOutput_create
+ */
+void            ACaptureSessionOutput_free(ACaptureSessionOutput* output);
+
+/**
+ * Add an {@link ACaptureSessionOutput} object to {@link ACaptureSessionOutputContainer}.
+ *
+ * @param container the {@link ACaptureSessionOutputContainer} of interest.
+ * @param output the output {@link ACaptureSessionOutput} to be added to container.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container or output is NULL.</li></ul>
+ */
+camera_status_t ACaptureSessionOutputContainer_add(
+        ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output);
+
+/**
+ * Remove an {@link ACaptureSessionOutput} object from {@link ACaptureSessionOutputContainer}.
+ *
+ * <p>This method has no effect if the ACaptureSessionOutput does not exist in
+ * ACaptureSessionOutputContainer.</p>
+ *
+ * @param container the {@link ACaptureSessionOutputContainer} of interest.
+ * @param output the output {@link ACaptureSessionOutput} to be removed from container.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container or output is NULL.</li></ul>
+ */
+camera_status_t ACaptureSessionOutputContainer_remove(
+        ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output);
+
+/**
+ * Create a new camera capture session by providing the target output set of {@link ANativeWindow}
+ * to the camera device.
+ *
+ * <p>If there is a preexisting session, the previous session will be closed
+ * automatically. However, app still needs to call {@link ACameraCaptureSession_close} on previous
+ * session. Otherwise the resources held by previous session will NOT be freed.</p>
+ *
+ * <p>The active capture session determines the set of potential output {@link ANativeWindow}s for
+ * the camera device for each capture request. A given request may use all
+ * or only some of the outputs. Once the ACameraCaptureSession is created, requests can be
+ * submitted with {@link ACameraCaptureSession_capture} or
+ * {@link ACameraCaptureSession_setRepeatingRequest}.</p>
+ *
+ * <p>Often the {@link ANativeWindow} used with this method can be obtained from a <a href=
+ * "http://developer.android.com/reference/android/view/Surface.html">Surface</a> java object by
+ * {@link ANativeWindow_fromSurface} NDK method. Surfaces or ANativeWindow suitable for inclusion as a camera
+ * output can be created for various use cases and targets:</p>
+ *
+ * <ul>
+ *
+ * <li>For drawing to a
+ *   <a href="http://developer.android.com/reference/android/view/SurfaceView.html">SurfaceView</a>:
+ *   Once the SurfaceView's Surface is created, set the size
+ *   of the Surface with
+ *   <a href="http://developer.android.com/reference/android/view/SurfaceHolder.html#setFixedSize(int, int)">
+ *    android.view.SurfaceHolder\#setFixedSize</a> to be one of the PRIVATE output sizes
+ *   returned by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}
+ *   and then obtain the Surface by calling <a href=
+ *   "http://developer.android.com/reference/android/view/SurfaceHolder.html#getSurface()">
+ *   android.view.SurfaceHolder\#getSurface</a>. If the size is not set by the application, it will
+ *   be rounded to the nearest supported size less than 1080p, by the camera device.</li>
+ *
+ * <li>For accessing through an OpenGL texture via a <a href=
+ *   "http://developer.android.com/reference/android/graphics/SurfaceTexture.html">SurfaceTexture</a>:
+ *   Set the size of the SurfaceTexture with <a href=
+ *   "http://developer.android.com/reference/android/graphics/SurfaceTexture.html#setDefaultBufferSize(int, int)">
+ *   setDefaultBufferSize</a> to be one of the PRIVATE output sizes
+ *   returned by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}
+ *   before creating a Surface from the SurfaceTexture with <a href=
+ *   "http://developer.android.com/reference/android/view/Surface.html#Surface(android.graphics.SurfaceTexture)">
+ *   Surface\#Surface(SurfaceTextrue)</a>. If the size is not set by the application, it will be set to be the
+ *   smallest supported size less than 1080p, by the camera device.</li>
+ *
+ * <li>For recording with <a href=
+ *     "http://developer.android.com/reference/android/media/MediaCodec.html">
+ *     MediaCodec</a>: Call
+ *   <a href=
+ *     "http://developer.android.com/reference/android/media/MediaCodec.html#createInputSurface()">
+ *     android.media.MediaCodec\#createInputSurface</a> after configuring
+ *   the media codec to use one of the PRIVATE output sizes
+ *   returned by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}.
+ *   </li>
+ *
+ * <li>For recording with <a href=
+ *    "http://developer.android.com/reference/android/media/MediaRecorder.html">
+ *    MediaRecorder</a>: Call
+ *   <a href="http://developer.android.com/reference/android/media/MediaRecorder.html#getSurface()">
+ *    android.media.MediaRecorder\#getSurface</a> after configuring the media recorder to use
+ *   one of the PRIVATE output sizes returned by
+ *   {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}, or configuring it to use one of the supported
+ *   <a href="http://developer.android.com/reference/android/media/CamcorderProfile.html">
+ *    CamcorderProfiles</a>.</li>
+ *
+ * <li>For efficient YUV processing with <a href=
+ *   "http://developer.android.com/reference/android/renderscript/package-summary.html">
+ *   RenderScript</a>:
+ *   Create a RenderScript
+ *   <a href="http://developer.android.com/reference/android/renderscript/Allocation.html">
+ *   Allocation</a> with a supported YUV
+ *   type, the IO_INPUT flag, and one of the YUV output sizes returned by
+ *   {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS},
+ *   Then obtain the Surface with
+ *   <a href="http://developer.android.com/reference/android/renderscript/Allocation.html#getSurface()">
+ *   Allocation#getSurface}</a>.</li>
+ *
+ * <li>For access to RAW, uncompressed YUV, or compressed JPEG data in the application: Create an
+ *   {@link AImageReader} object using the {@link AImageReader_new} method with one of the supported
+ *   output formats given by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}. Then obtain a
+ *   ANativeWindow from it with {@link AImageReader_getWindow}.
+ *   If the AImageReader size is not set to a supported size, it will be rounded to a supported
+ *   size less than 1080p by the camera device.
+ *   </li>
+ *
+ * </ul>
+ *
+ * <p>The camera device will query each ANativeWindow's size and formats upon this
+ * call, so they must be set to a valid setting at this time.</p>
+ *
+ * <p>It can take several hundred milliseconds for the session's configuration to complete,
+ * since camera hardware may need to be powered on or reconfigured.</p>
+ *
+ * <p>If a prior ACameraCaptureSession already exists when this method is called, the previous
+ * session will no longer be able to accept new capture requests and will be closed. Any
+ * in-progress capture requests made on the prior session will be completed before it's closed.
+ * To minimize the transition time,
+ * the ACameraCaptureSession_abortCaptures method can be used to discard the remaining
+ * requests for the prior capture session before a new one is created. Note that once the new
+ * session is created, the old one can no longer have its captures aborted.</p>
+ *
+ * <p>Using larger resolution outputs, or more outputs, can result in slower
+ * output rate from the device.</p>
+ *
+ * <p>Configuring a session with an empty list will close the current session, if
+ * any. This can be used to release the current session's target surfaces for another use.</p>
+ *
+ * <p>While any of the sizes from {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS} can be used when
+ * a single output stream is configured, a given camera device may not be able to support all
+ * combination of sizes, formats, and targets when multiple outputs are configured at once.  The
+ * tables below list the maximum guaranteed resolutions for combinations of streams and targets,
+ * given the capabilities of the camera device.</p>
+ *
+ * <p>If an application tries to create a session using a set of targets that exceed the limits
+ * described in the below tables, one of three possibilities may occur. First, the session may
+ * be successfully created and work normally. Second, the session may be successfully created,
+ * but the camera device won't meet the frame rate guarantees as described in
+ * {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS}. Or third, if the output set
+ * cannot be used at all, session creation will fail entirely, with
+ * {@link ACAMERA_ERROR_STREAM_CONFIGURE_FAIL} being returned.</p>
+ *
+ * <p>For the type column `PRIV` refers to output format {@link AIMAGE_FORMAT_PRIVATE},
+ * `YUV` refers to output format {@link AIMAGE_FORMAT_YUV_420_888},
+ * `JPEG` refers to output format {@link AIMAGE_FORMAT_JPEG},
+ * and `RAW` refers to output format {@link AIMAGE_FORMAT_RAW16}
+ *
+ *
+ * <p>For the maximum size column, `PREVIEW` refers to the best size match to the
+ * device's screen resolution, or to 1080p `(1920x1080)`, whichever is
+ * smaller. `RECORD` refers to the camera device's maximum supported recording resolution,
+ * as determined by <a href="http://developer.android.com/reference/android/media/CamcorderProfile.html">
+ * android.media.CamcorderProfiles</a>. And `MAXIMUM` refers to the
+ * camera device's maximum output resolution for that format or target from
+ * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}.</p>
+ *
+ * <p>To use these tables, determine the number and the formats/targets of outputs needed, and
+ * find the row(s) of the table with those targets. The sizes indicate the maximum set of sizes
+ * that can be used; it is guaranteed that for those targets, the listed sizes and anything
+ * smaller from the list given by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS} can be
+ * successfully used to create a session.  For example, if a row indicates that a 8 megapixel
+ * (MP) YUV_420_888 output can be used together with a 2 MP `PRIV` output, then a session
+ * can be created with targets `[8 MP YUV, 2 MP PRIV]` or targets `[2 MP YUV, 2 MP PRIV]`;
+ * but a session with targets `[8 MP YUV, 4 MP PRIV]`, targets `[4 MP YUV, 4 MP PRIV]`,
+ * or targets `[8 MP PRIV, 2 MP YUV]` would not be guaranteed to work, unless
+ * some other row of the table lists such a combination.</p>
+ *
+ * <p>Legacy devices ({@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL}
+ * `== `{@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY LEGACY}) support at
+ * least the following stream combinations:
+ *
+ * <table>
+ * <tr><th colspan="7">LEGACY-level guaranteed configurations</th></tr>
+ * <tr> <th colspan="2" id="rb">Target 1</th> <th colspan="2" id="rb">Target 2</th>  <th colspan="2" id="rb">Target 3</th> <th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr> <th>Type</th><th id="rb">Max size</th> <th>Type</th><th id="rb">Max size</th> <th>Type</th><th id="rb">Max size</th></tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td colspan="2" id="rb"></td> <td>Simple preview, GPU video processing, or no-preview video recording.</td> </tr>
+ * <tr> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td colspan="2" id="rb"></td> <td>No-viewfinder still image capture.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td colspan="2" id="rb"></td> <td>In-application video/image processing.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Standard still imaging.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>In-app processing plus still capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td colspan="2" id="rb"></td> <td>Standard recording.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td colspan="2" id="rb"></td> <td>Preview plus in-app processing.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>Still capture plus in-app processing.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>Limited-level ({@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL}
+ * `== `{@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED LIMITED}) devices
+ * support at least the following stream combinations in addition to those for
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY LEGACY} devices:
+ *
+ * <table>
+ * <tr><th colspan="7">LIMITED-level additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th colspan="2" id="rb">Target 3</th> <th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th></tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`RECORD `</td> <td colspan="2" id="rb"></td> <td>High-resolution video recording with preview.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`RECORD `</td> <td colspan="2" id="rb"></td> <td>High-resolution in-app video processing with preview.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`RECORD `</td> <td colspan="2" id="rb"></td> <td>Two-input in-app video processing.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`RECORD `</td> <td>`JPEG`</td><td id="rb">`RECORD `</td> <td>High-resolution recording with video snapshot.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`RECORD `</td> <td>`JPEG`</td><td id="rb">`RECORD `</td> <td>High-resolution in-app processing with video snapshot.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>Two-input in-app processing with still capture.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>FULL-level ({@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL}
+ * `== `{@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_FULL FULL}) devices
+ * support at least the following stream combinations in addition to those for
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED LIMITED} devices:
+ *
+ * <table>
+ * <tr><th colspan="7">FULL-level additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th colspan="2" id="rb">Target 3</th> <th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Maximum-resolution GPU processing with preview.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Maximum-resolution in-app processing with preview.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Maximum-resolution two-input in-app processsing.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>Video recording with maximum-size video snapshot</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`640x480`</td> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td>Standard video recording plus maximum-resolution in-app processing.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`640x480`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td>Preview plus two-input maximum-resolution in-app processing.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>RAW-capability ({@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES} includes
+ * {@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_RAW RAW}) devices additionally support
+ * at least the following stream combinations on both
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_FULL FULL} and
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED LIMITED} devices:
+ *
+ * <table>
+ * <tr><th colspan="7">RAW-capability additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th colspan="2" id="rb">Target 3</th> <th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th> </tr>
+ * <tr> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td colspan="2" id="rb"></td> <td>No-preview DNG capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Standard DNG capture.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>In-app processing plus DNG capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>Video recording with DNG capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>Preview with in-app processing and DNG capture.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>Two-input in-app processing plus DNG capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>Still capture with simultaneous JPEG and DNG.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>In-app processing with simultaneous JPEG and DNG.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>BURST-capability ({@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES} includes
+ * {@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE}) devices
+ * support at least the below stream combinations in addition to those for
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED LIMITED} devices. Note that all
+ * FULL-level devices support the BURST capability, and the below list is a strict subset of the
+ * list for FULL-level devices, so this table is only relevant for LIMITED-level devices that
+ * support the BURST_CAPTURE capability.
+ *
+ * <table>
+ * <tr><th colspan="5">BURST-capability additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`MAXIMUM`</td> <td>Maximum-resolution GPU processing with preview.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td>Maximum-resolution in-app processing with preview.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td>Maximum-resolution two-input in-app processsing.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>LEVEL-3 ({@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL}
+ * `== `{@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_3 LEVEL_3})
+ * support at least the following stream combinations in addition to the combinations for
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_FULL FULL} and for
+ * RAW capability ({@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES} includes
+ * {@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_RAW RAW}):
+ *
+ * <table>
+ * <tr><th colspan="11">LEVEL-3 additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th colspan="2" id="rb">Target 3</th><th colspan="2" id="rb">Target 4</th><th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`640x480`</td> <td>`YUV`</td><td id="rb">`MAXIMUM`</td> <td>`RAW`</td><td id="rb">`MAXIMUM`</td> <td>In-app viewfinder analysis with dynamic selection of output format.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`640x480`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>`RAW`</td><td id="rb">`MAXIMUM`</td> <td>In-app viewfinder analysis with dynamic selection of output format.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>Since the capabilities of camera devices vary greatly, a given camera device may support
+ * target combinations with sizes outside of these guarantees, but this can only be tested for
+ * by attempting to create a session with such targets.</p>
+ *
+ * @param device the camera device of interest.
+ * @param outputs the {@link ACaptureSessionOutputContainer} describes all output streams.
+ * @param callbacks the {@link ACameraCaptureSession_stateCallbacks capture session state callbacks}.
+ * @param session the created {@link ACameraCaptureSession} will be filled here if the method call
+ *        succeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds. The created capture session will be
+ *                                filled in session argument.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if any of device, outputs, callbacks or
+ *                                session is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error.</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ */
+camera_status_t ACameraDevice_createCaptureSession(
+        ACameraDevice* device,
+        const ACaptureSessionOutputContainer*       outputs,
+        const ACameraCaptureSession_stateCallbacks* callbacks,
+        /*out*/ACameraCaptureSession** session);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // _NDK_CAMERA_DEVICE_H
+
+/** @} */
+
diff --git a/include/camera/ndk/NdkCameraError.h b/include/camera/ndk/NdkCameraError.h
new file mode 100644
index 0000000..36251fc
--- /dev/null
+++ b/include/camera/ndk/NdkCameraError.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraError.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#ifndef _NDK_CAMERA_ERROR_H
+#define _NDK_CAMERA_ERROR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+    ACAMERA_OK = 0,
+
+    ACAMERA_ERROR_BASE                  = -10000,
+
+    /**
+     * Camera operation has failed due to an unspecified cause.
+     */
+    ACAMERA_ERROR_UNKNOWN               = ACAMERA_ERROR_BASE,
+
+    /**
+     * Camera operation has failed due to an invalid parameter being passed to the method.
+     */
+    ACAMERA_ERROR_INVALID_PARAMETER     = ACAMERA_ERROR_BASE - 1,
+
+    /**
+     * Camera operation has failed because the camera device has been closed, possibly because a
+     * higher-priority client has taken ownership of the camera device.
+     */
+    ACAMERA_ERROR_CAMERA_DISCONNECTED   = ACAMERA_ERROR_BASE - 2,
+
+    /**
+     * Camera operation has failed due to insufficient memory.
+     */
+    ACAMERA_ERROR_NOT_ENOUGH_MEMORY     = ACAMERA_ERROR_BASE - 3,
+
+    /**
+     * Camera operation has failed due to the requested metadata tag cannot be found in input
+     * {@link ACameraMetadata} or {@link ACaptureRequest}.
+     */
+    ACAMERA_ERROR_METADATA_NOT_FOUND    = ACAMERA_ERROR_BASE - 4,
+
+    /**
+     * Camera operation has failed and the camera device has encountered a fatal error and needs to
+     * be re-opened before it can be used again.
+     */
+    ACAMERA_ERROR_CAMERA_DEVICE         = ACAMERA_ERROR_BASE - 5,
+
+    /**
+     * Camera operation has failed and the camera service has encountered a fatal error.
+     *
+     * <p>The Android device may need to be shut down and restarted to restore
+     * camera function, or there may be a persistent hardware problem.</p>
+     *
+     * <p>An attempt at recovery may be possible by closing the
+     * ACameraDevice and the ACameraManager, and trying to acquire all resources
+     * again from scratch.</p>
+     */
+    ACAMERA_ERROR_CAMERA_SERVICE        = ACAMERA_ERROR_BASE - 6,
+
+    /**
+     * The {@link ACameraCaptureSession} has been closed and cannnot perform any operation other
+     * than {@link ACameraCaptureSession_close}.
+     */
+    ACAMERA_ERROR_SESSION_CLOSED        = ACAMERA_ERROR_BASE - 7,
+
+    /**
+     * Camera operation has failed due to an invalid internal operation. Usually this is due to a
+     * low-level problem that may resolve itself on retry
+     */
+    ACAMERA_ERROR_INVALID_OPERATION     = ACAMERA_ERROR_BASE - 8,
+
+    /**
+     * Camera device does not support the stream configuration provided by application in
+     * {@link ACameraDevice_createCaptureSession}.
+     */
+    ACAMERA_ERROR_STREAM_CONFIGURE_FAIL = ACAMERA_ERROR_BASE - 9,
+
+    /**
+     * Camera device is being used by another higher priority camera API client.
+     */
+    ACAMERA_ERROR_CAMERA_IN_USE         = ACAMERA_ERROR_BASE - 10,
+
+    /**
+     * The system-wide limit for number of open cameras or camera resources has been reached, and
+     * more camera devices cannot be opened until previous instances are closed.
+     */
+    ACAMERA_ERROR_MAX_CAMERA_IN_USE     = ACAMERA_ERROR_BASE - 11,
+
+    /**
+     * The camera is disabled due to a device policy, and cannot be opened.
+     */
+    ACAMERA_ERROR_CAMERA_DISABLED       = ACAMERA_ERROR_BASE - 12,
+
+    /**
+     * The application does not have permission to open camera.
+     */
+    ACAMERA_ERROR_PERMISSION_DENIED     = ACAMERA_ERROR_BASE - 13,
+} camera_status_t;
+
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // _NDK_CAMERA_ERROR_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCameraManager.h b/include/camera/ndk/NdkCameraManager.h
new file mode 100644
index 0000000..9188e94
--- /dev/null
+++ b/include/camera/ndk/NdkCameraManager.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraManager.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#ifndef _NDK_CAMERA_MANAGER_H
+#define _NDK_CAMERA_MANAGER_H
+
+#include "NdkCameraError.h"
+#include "NdkCameraMetadata.h"
+#include "NdkCameraDevice.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * ACameraManager is opaque type that provides access to camera service.
+ *
+ * A pointer can be obtained using {@link ACameraManager_create} method.
+ */
+typedef struct ACameraManager ACameraManager;
+
+/**
+ * Create ACameraManager instance.
+ *
+ * <p>The ACameraManager is responsible for
+ * detecting, characterizing, and connecting to {@link ACameraDevice}s.</p>
+ *
+ * <p>The caller must call {@link ACameraManager_delete} to free the resources once it is done
+ * using the ACameraManager instance.</p>
+ *
+ * @return a {@link ACameraManager} instance.
+ *
+ */
+ACameraManager* ACameraManager_create();
+
+/**
+ * <p>Delete the {@link ACameraManager} instance and free its resources. </p>
+ *
+ * @param manager the {@link ACameraManager} instance to be deleted.
+ */
+void ACameraManager_delete(ACameraManager* manager);
+
+/// Struct to hold list of camera devices
+typedef struct ACameraIdList {
+    int numCameras;          ///< Number of connected camera devices
+    const char** cameraIds;  ///< list of identifier of connected camera devices
+} ACameraIdList;
+
+/**
+ * Create a list of currently connected camera devices, including
+ * cameras that may be in use by other camera API clients.
+ *
+ * <p>Non-removable cameras use integers starting at 0 for their
+ * identifiers, while removable cameras have a unique identifier for each
+ * individual device, even if they are the same model.</p>
+ *
+ * <p>ACameraManager_getCameraIdList will allocate and return an {@link ACameraIdList}.
+ * The caller must call {@link ACameraManager_deleteCameraIdList} to free the memory</p>
+ *
+ * @param manager the {@link ACameraManager} of interest
+ * @param cameraIdList the output {@link ACameraIdList} will be filled in here if the method call
+ *        succeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager or cameraIdList is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if connection to camera service fails.</li>
+ *         <li>{@link ACAMERA_ERROR_NOT_ENOUGH_MEMORY} if allocating memory fails.</li></ul>
+ */
+camera_status_t ACameraManager_getCameraIdList(ACameraManager* manager,
+                                              /*out*/ACameraIdList** cameraIdList);
+
+/**
+ * Delete a list of camera devices allocated via {@link ACameraManager_getCameraIdList}.
+ *
+ * @param cameraIdList the {@link ACameraIdList} to be deleted.
+ */
+void ACameraManager_deleteCameraIdList(ACameraIdList* cameraIdList);
+
+/**
+ * Definition of camera availability callbacks.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraManager_AvailabilityCallbacks}.
+ * @param cameraId The ID of the camera device whose availability is changing. The memory of this
+ *                 argument is owned by camera framework and will become invalid immediately after
+ *                 this callback returns.
+ */
+typedef void (*ACameraManager_AvailabilityCallback)(void* context, const char* cameraId);
+
+/**
+ * A listener for camera devices becoming available or unavailable to open.
+ *
+ * <p>Cameras become available when they are no longer in use, or when a new
+ * removable camera is connected. They become unavailable when some
+ * application or service starts using a camera, or when a removable camera
+ * is disconnected.</p>
+ *
+ * @see ACameraManager_registerAvailabilityCallback
+ */
+typedef struct ACameraManager_AvailabilityListener {
+    /// Optional application context.
+    void*                               context;
+    /// Called when a camera becomes available
+    ACameraManager_AvailabilityCallback onCameraAvailable;
+    /// Called when a camera becomes unavailable
+    ACameraManager_AvailabilityCallback onCameraUnavailable;
+} ACameraManager_AvailabilityCallbacks;
+
+/**
+ * Register camera availability callbacks.
+ *
+ * <p>onCameraUnavailable will be called whenever a camera device is opened by any camera API client.
+ * Other camera API clients may still be able to open such a camera device, evicting the existing
+ * client if they have higher priority than the existing client of a camera device.
+ * See {@link ACameraManager_openCamera} for more details.</p>
+ *
+ * <p>The callbacks will be called on a dedicated thread shared among all ACameraManager
+ * instances.</p>
+ *
+ * <p>Since this callback will be registered with the camera service, remember to unregister it
+ * once it is no longer needed; otherwise the callback will continue to receive events
+ * indefinitely and it may prevent other resources from being released. Specifically, the
+ * callbacks will be invoked independently of the general activity lifecycle and independently
+ * of the state of individual ACameraManager instances.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param callback the {@link ACameraManager_AvailabilityCallbacks} to be registered.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager or callback is NULL, or
+ *                  {ACameraManager_AvailabilityCallbacks#onCameraAvailable} or
+ *                  {ACameraManager_AvailabilityCallbacks#onCameraUnavailable} is NULL.</li></ul>
+ */
+camera_status_t ACameraManager_registerAvailabilityCallback(
+        ACameraManager* manager, const ACameraManager_AvailabilityCallbacks* callback);
+
+/**
+ * Unregister camera availability callbacks.
+ *
+ * <p>Removing a callback that isn't registered has no effect.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param callback the {@link ACameraManager_AvailabilityCallbacks} to be unregistered.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if callback,
+ *                  {ACameraManager_AvailabilityCallbacks#onCameraAvailable} or
+ *                  {ACameraManager_AvailabilityCallbacks#onCameraUnavailable} is NULL.</li></ul>
+ */
+camera_status_t ACameraManager_unregisterAvailabilityCallback(
+        ACameraManager* manager, const ACameraManager_AvailabilityCallbacks* callback);
+
+/**
+ * Query the capabilities of a camera device. These capabilities are
+ * immutable for a given camera.
+ *
+ * <p>See {@link ACameraMetadata} document and {@link NdkCameraMetadataTags.h} for more details.</p>
+ *
+ * <p>The caller must call {@link ACameraMetadata_free} to free the memory of the output
+ * characteristics.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param cameraId the ID string of the camera device of interest.
+ * @param characteristics the output {@link ACameraMetadata} will be filled here if the method call
+ *        succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager, cameraId, or characteristics
+ *                  is NULL, or cameraId does not match any camera devices connected.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if connection to camera service fails.</li>
+ *         <li>{@link ACAMERA_ERROR_NOT_ENOUGH_MEMORY} if allocating memory fails.</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ */
+camera_status_t ACameraManager_getCameraCharacteristics(
+        ACameraManager* manager, const char* cameraId,
+        /*out*/ACameraMetadata** characteristics);
+
+/**
+ * Open a connection to a camera with the given ID. The opened camera device will be
+ * returned in the `device` parameter.
+ *
+ * <p>Use {@link ACameraManager_getCameraIdList} to get the list of available camera
+ * devices. Note that even if an id is listed, open may fail if the device
+ * is disconnected between the calls to {@link ACameraManager_getCameraIdList} and
+ * {@link ACameraManager_openCamera}, or if a higher-priority camera API client begins using the
+ * camera device.</p>
+ *
+ * <p>Devices for which the
+ * {@link ACameraManager_AvailabilityCallbacks#onCameraUnavailable} callback has been called due to
+ * the device being in use by a lower-priority, background camera API client can still potentially
+ * be opened by calling this method when the calling camera API client has a higher priority
+ * than the current camera API client using this device.  In general, if the top, foreground
+ * activity is running within your application process, your process will be given the highest
+ * priority when accessing the camera, and this method will succeed even if the camera device is
+ * in use by another camera API client. Any lower-priority application that loses control of the
+ * camera in this way will receive an
+ * {@link ACameraDevice_stateCallbacks#onDisconnected} callback.</p>
+ *
+ * <p>Once the camera is successfully opened,the ACameraDevice can then be set up
+ * for operation by calling {@link ACameraDevice_createCaptureSession} and
+ * {@link ACameraDevice_createCaptureRequest}.</p>
+ *
+ * <p>If the camera becomes disconnected after this function call returns,
+ * {@link ACameraDevice_stateCallbacks#onDisconnected} with a
+ * ACameraDevice in the disconnected state will be called.</p>
+ *
+ * <p>If the camera runs into error after this function call returns,
+ * {@link ACameraDevice_stateCallbacks#onError} with a
+ * ACameraDevice in the error state will be called.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param cameraId the ID string of the camera device to be opened.
+ * @param callback the {@link ACameraDevice_StateCallbacks} associated with the opened camera device.
+ * @param device the opened {@link ACameraDevice} will be filled here if the method call succeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager, cameraId, callback, or device
+ *                  is NULL, or cameraId does not match any camera devices connected.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if connection to camera service fails.</li>
+ *         <li>{@link ACAMERA_ERROR_NOT_ENOUGH_MEMORY} if allocating memory fails.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_IN_USE} if camera device is being used by a higher
+ *                   priority camera API client.</li>
+ *         <li>{@link ACAMERA_ERROR_MAX_CAMERA_IN_USE} if the system-wide limit for number of open
+ *                   cameras or camera resources has been reached, and more camera devices cannot be
+ *                   opened until previous instances are closed.</li>
+ *         <li>{@link ACAMERA_ERROR_CAMERA_DISABLED} if the camera is disabled due to a device
+ *                   policy, and cannot be opened.</li>
+ *         <li>{@link ACAMERA_ERROR_PERMISSION_DENIED} if the application does not have permission
+ *                   to open camera.</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ */
+camera_status_t ACameraManager_openCamera(
+        ACameraManager* manager, const char* cameraId,
+        ACameraDevice_StateCallbacks* callback,
+        /*out*/ACameraDevice** device);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif //_NDK_CAMERA_MANAGER_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCameraMetadata.h b/include/camera/ndk/NdkCameraMetadata.h
new file mode 100644
index 0000000..d929854
--- /dev/null
+++ b/include/camera/ndk/NdkCameraMetadata.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraMetadata.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#ifndef _NDK_CAMERA_METADATA_H
+#define _NDK_CAMERA_METADATA_H
+
+#include "NdkCameraError.h"
+#include "NdkCameraMetadataTags.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * ACameraMetadata is opaque type that provides access to read-only camera metadata like camera
+ * characteristics (via {@link ACameraManager_getCameraCharacteristics}) or capture results (via
+ * {@link ACameraCaptureSession_captureCallback_result}).
+ */
+typedef struct ACameraMetadata ACameraMetadata;
+
+/**
+ * Possible data types of a metadata entry.
+ *
+ * Keep in sync with system/media/include/system/camera_metadata.h
+ */
+enum {
+    /// Unsigned 8-bit integer (uint8_t)
+    ACAMERA_TYPE_BYTE = 0,
+    /// Signed 32-bit integer (int32_t)
+    ACAMERA_TYPE_INT32 = 1,
+    /// 32-bit float (float)
+    ACAMERA_TYPE_FLOAT = 2,
+    /// Signed 64-bit integer (int64_t)
+    ACAMERA_TYPE_INT64 = 3,
+    /// 64-bit float (double)
+    ACAMERA_TYPE_DOUBLE = 4,
+    /// A 64-bit fraction (ACameraMetadata_rational)
+    ACAMERA_TYPE_RATIONAL = 5,
+    /// Number of type fields
+    ACAMERA_NUM_TYPES
+};
+
+/**
+ * Definition of rational data type in {@link ACameraMetadata}.
+ */
+typedef struct ACameraMetadata_rational {
+    int32_t numerator;
+    int32_t denominator;
+} ACameraMetadata_rational;
+
+/**
+ * A single camera metadata entry.
+ *
+ * <p>Each entry is an array of values, though many metadata fields may only have 1 entry in the
+ * array.</p>
+ */
+typedef struct ACameraMetadata_entry {
+    /**
+     * The tag identifying the entry.
+     *
+     * <p> It is one of the values defined in {@link NdkCameraMetadataTags.h}, and defines how the
+     * entry should be interpreted and which parts of the API provide it.
+     * See {@link NdkCameraMetadataTags.h} for more details. </p>
+     */
+    uint32_t tag;
+
+    /**
+     * The data type of this metadata entry.
+     *
+     * <p>Must be one of ACAMERA_TYPE_* enum values defined above. A particular tag always has the
+     * same type.</p>
+     */
+    uint8_t  type;
+
+    /**
+     * Count of elements (NOT count of bytes) in this metadata entry.
+     */
+    uint32_t count;
+
+    /**
+     * Pointer to the data held in this metadata entry.
+     *
+     * <p>The type field above defines which union member pointer is valid. The count field above
+     * defines the length of the data in number of elements.</p>
+     */
+    union {
+        uint8_t *u8;
+        int32_t *i32;
+        float   *f;
+        int64_t *i64;
+        double  *d;
+        ACameraMetadata_rational* r;
+    } data;
+} ACameraMetadata_entry;
+
+/**
+ * A single read-only camera metadata entry.
+ *
+ * <p>Each entry is an array of values, though many metadata fields may only have 1 entry in the
+ * array.</p>
+ */
+typedef struct ACameraMetadata_const_entry {
+    /**
+     * The tag identifying the entry.
+     *
+     * <p> It is one of the values defined in {@link NdkCameraMetadataTags.h}, and defines how the
+     * entry should be interpreted and which parts of the API provide it.
+     * See {@link NdkCameraMetadataTags.h} for more details. </p>
+     */
+    uint32_t tag;
+
+    /**
+     * The data type of this metadata entry.
+     *
+     * <p>Must be one of ACAMERA_TYPE_* enum values defined above. A particular tag always has the
+     * same type.</p>
+     */
+    uint8_t  type;
+
+    /**
+     * Count of elements (NOT count of bytes) in this metadata entry.
+     */
+    uint32_t count;
+
+    /**
+     * Pointer to the data held in this metadata entry.
+     *
+     * <p>The type field above defines which union member pointer is valid. The count field above
+     * defines the length of the data in number of elements.</p>
+     */
+    union {
+        const uint8_t *u8;
+        const int32_t *i32;
+        const float   *f;
+        const int64_t *i64;
+        const double  *d;
+        const ACameraMetadata_rational* r;
+    } data;
+} ACameraMetadata_const_entry;
+
+/**
+ * Get a metadata entry from an input {@link ACameraMetadata}.
+ *
+ * <p>The memory of the data field in the returned entry is managed by camera framework. Do not
+ * attempt to free it.</p>
+ *
+ * @param metadata the {@link ACameraMetadata} of interest.
+ * @param tag the tag value of the camera metadata entry to be get.
+ * @param entry the output {@link ACameraMetadata_const_entry} will be filled here if the method
+ *        call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if metadata or entry is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_METADATA_NOT_FOUND} if input metadata does not contain an entry
+ *             of input tag value.</li></ul>
+ */
+camera_status_t ACameraMetadata_getConstEntry(
+        const ACameraMetadata* metadata, uint32_t tag, /*out*/ACameraMetadata_const_entry* entry);
+
+/**
+ * List all the entry tags in input {@link ACameraMetadata}.
+ *
+ * @param metadata the {@link ACameraMetadata} of interest.
+ * @param numEntries number of metadata entries in input {@link ACameraMetadata}
+ * @param tags the tag values of the metadata entries. Length of tags is returned in numEntries
+ *             argument. The memory is managed by ACameraMetadata itself and must NOT be free/delete
+ *             by application. Do NOT access tags after calling ACameraMetadata_free.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if metadata, numEntries or tags is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ */
+camera_status_t ACameraMetadata_getAllTags(
+        const ACameraMetadata* metadata, /*out*/int32_t* numEntries, /*out*/const uint32_t** tags);
+
+/**
+ * Create a copy of input {@link ACameraMetadata}.
+ *
+ * <p>The returned ACameraMetadata must be freed by the application by {@link ACameraMetadata_free}
+ * after application is done using it.</p>
+ *
+ * @param src the input {@link ACameraMetadata} to be copied.
+ *
+ * @return a valid ACameraMetadata pointer or NULL if the input metadata cannot be copied.
+ */
+ACameraMetadata* ACameraMetadata_copy(const ACameraMetadata* src);
+
+/**
+ * Free a {@link ACameraMetadata} structure.
+ *
+ * @param metadata the {@link ACameraMetadata} to be freed.
+ */
+void ACameraMetadata_free(ACameraMetadata* metadata);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif //_NDK_CAMERA_METADATA_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCameraMetadataTags.h b/include/camera/ndk/NdkCameraMetadataTags.h
new file mode 100644
index 0000000..e7f6989
--- /dev/null
+++ b/include/camera/ndk/NdkCameraMetadataTags.h
@@ -0,0 +1,6908 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraMetadataTags.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#ifndef _NDK_CAMERA_METADATA_TAGS_H
+#define _NDK_CAMERA_METADATA_TAGS_H
+
+typedef enum acamera_metadata_section {
+    ACAMERA_COLOR_CORRECTION,
+    ACAMERA_CONTROL,
+    ACAMERA_DEMOSAIC,
+    ACAMERA_EDGE,
+    ACAMERA_FLASH,
+    ACAMERA_FLASH_INFO,
+    ACAMERA_HOT_PIXEL,
+    ACAMERA_JPEG,
+    ACAMERA_LENS,
+    ACAMERA_LENS_INFO,
+    ACAMERA_NOISE_REDUCTION,
+    ACAMERA_QUIRKS,
+    ACAMERA_REQUEST,
+    ACAMERA_SCALER,
+    ACAMERA_SENSOR,
+    ACAMERA_SENSOR_INFO,
+    ACAMERA_SHADING,
+    ACAMERA_STATISTICS,
+    ACAMERA_STATISTICS_INFO,
+    ACAMERA_TONEMAP,
+    ACAMERA_LED,
+    ACAMERA_INFO,
+    ACAMERA_BLACK_LEVEL,
+    ACAMERA_SYNC,
+    ACAMERA_REPROCESS,
+    ACAMERA_DEPTH,
+    ACAMERA_SECTION_COUNT,
+
+    ACAMERA_VENDOR = 0x8000
+} acamera_metadata_section_t;
+
+/**
+ * Hierarchy positions in enum space.
+ */
+typedef enum acamera_metadata_section_start {
+    ACAMERA_COLOR_CORRECTION_START = ACAMERA_COLOR_CORRECTION  << 16,
+    ACAMERA_CONTROL_START          = ACAMERA_CONTROL           << 16,
+    ACAMERA_DEMOSAIC_START         = ACAMERA_DEMOSAIC          << 16,
+    ACAMERA_EDGE_START             = ACAMERA_EDGE              << 16,
+    ACAMERA_FLASH_START            = ACAMERA_FLASH             << 16,
+    ACAMERA_FLASH_INFO_START       = ACAMERA_FLASH_INFO        << 16,
+    ACAMERA_HOT_PIXEL_START        = ACAMERA_HOT_PIXEL         << 16,
+    ACAMERA_JPEG_START             = ACAMERA_JPEG              << 16,
+    ACAMERA_LENS_START             = ACAMERA_LENS              << 16,
+    ACAMERA_LENS_INFO_START        = ACAMERA_LENS_INFO         << 16,
+    ACAMERA_NOISE_REDUCTION_START  = ACAMERA_NOISE_REDUCTION   << 16,
+    ACAMERA_QUIRKS_START           = ACAMERA_QUIRKS            << 16,
+    ACAMERA_REQUEST_START          = ACAMERA_REQUEST           << 16,
+    ACAMERA_SCALER_START           = ACAMERA_SCALER            << 16,
+    ACAMERA_SENSOR_START           = ACAMERA_SENSOR            << 16,
+    ACAMERA_SENSOR_INFO_START      = ACAMERA_SENSOR_INFO       << 16,
+    ACAMERA_SHADING_START          = ACAMERA_SHADING           << 16,
+    ACAMERA_STATISTICS_START       = ACAMERA_STATISTICS        << 16,
+    ACAMERA_STATISTICS_INFO_START  = ACAMERA_STATISTICS_INFO   << 16,
+    ACAMERA_TONEMAP_START          = ACAMERA_TONEMAP           << 16,
+    ACAMERA_LED_START              = ACAMERA_LED               << 16,
+    ACAMERA_INFO_START             = ACAMERA_INFO              << 16,
+    ACAMERA_BLACK_LEVEL_START      = ACAMERA_BLACK_LEVEL       << 16,
+    ACAMERA_SYNC_START             = ACAMERA_SYNC              << 16,
+    ACAMERA_REPROCESS_START        = ACAMERA_REPROCESS         << 16,
+    ACAMERA_DEPTH_START            = ACAMERA_DEPTH             << 16,
+    ACAMERA_VENDOR_START           = ACAMERA_VENDOR            << 16
+} acamera_metadata_section_start_t;
+
+/**
+ * Main enum for camera metadata tags.
+ */
+typedef enum acamera_metadata_tag {
+    /**
+     * <p>The mode control selects how the image data is converted from the
+     * sensor's native color into linear sRGB color.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When auto-white balance (AWB) is enabled with ACAMERA_CONTROL_AWB_MODE, this
+     * control is overridden by the AWB routine. When AWB is disabled, the
+     * application controls how the color mapping is performed.</p>
+     * <p>We define the expected processing pipeline below. For consistency
+     * across devices, this is always the case with TRANSFORM_MATRIX.</p>
+     * <p>When either FULL or HIGH_QUALITY is used, the camera device may
+     * do additional processing but ACAMERA_COLOR_CORRECTION_GAINS and
+     * ACAMERA_COLOR_CORRECTION_TRANSFORM will still be provided by the
+     * camera device (in the results) and be roughly correct.</p>
+     * <p>Switching to TRANSFORM_MATRIX and using the data provided from
+     * FAST or HIGH_QUALITY will yield a picture with the same white point
+     * as what was produced by the camera device in the earlier frame.</p>
+     * <p>The expected processing pipeline is as follows:</p>
+     * <p><img alt="White balance processing pipeline" src="../images/camera2/metadata/android.colorCorrection.mode/processing_pipeline.png" /></p>
+     * <p>The white balance is encoded by two values, a 4-channel white-balance
+     * gain vector (applied in the Bayer domain), and a 3x3 color transform
+     * matrix (applied after demosaic).</p>
+     * <p>The 4-channel white-balance gains are defined as:</p>
+     * <pre><code>ACAMERA_COLOR_CORRECTION_GAINS = [ R G_even G_odd B ]
+     * </code></pre>
+     * <p>where <code>G_even</code> is the gain for green pixels on even rows of the
+     * output, and <code>G_odd</code> is the gain for green pixels on the odd rows.
+     * These may be identical for a given camera device implementation; if
+     * the camera device does not support a separate gain for even/odd green
+     * channels, it will use the <code>G_even</code> value, and write <code>G_odd</code> equal to
+     * <code>G_even</code> in the output result metadata.</p>
+     * <p>The matrices for color transforms are defined as a 9-entry vector:</p>
+     * <pre><code>ACAMERA_COLOR_CORRECTION_TRANSFORM = [ I0 I1 I2 I3 I4 I5 I6 I7 I8 ]
+     * </code></pre>
+     * <p>which define a transform from input sensor colors, <code>P_in = [ r g b ]</code>,
+     * to output linear sRGB, <code>P_out = [ r' g' b' ]</code>,</p>
+     * <p>with colors as follows:</p>
+     * <pre><code>r' = I0r + I1g + I2b
+     * g' = I3r + I4g + I5b
+     * b' = I6r + I7g + I8b
+     * </code></pre>
+     * <p>Both the input and output value ranges must match. Overflow/underflow
+     * values are clipped to fit within the range.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * @see ACAMERA_CONTROL_AWB_MODE
+     */
+    ACAMERA_COLOR_CORRECTION_MODE =                             // byte (enum)
+            ACAMERA_COLOR_CORRECTION_START,
+    /**
+     * <p>A color transform matrix to use to transform
+     * from sensor RGB color space to output linear sRGB color space.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This matrix is either set by the camera device when the request
+     * ACAMERA_COLOR_CORRECTION_MODE is not TRANSFORM_MATRIX, or
+     * directly by the application in the request when the
+     * ACAMERA_COLOR_CORRECTION_MODE is TRANSFORM_MATRIX.</p>
+     * <p>In the latter case, the camera device may round the matrix to account
+     * for precision issues; the final rounded matrix should be reported back
+     * in this matrix result metadata. The transform should keep the magnitude
+     * of the output color values within <code>[0, 1.0]</code> (assuming input color
+     * values is within the normalized range <code>[0, 1.0]</code>), or clipping may occur.</p>
+     * <p>The valid range of each matrix element varies on different devices, but
+     * values within [-1.5, 3.0] are guaranteed not to be clipped.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_MODE
+     */
+    ACAMERA_COLOR_CORRECTION_TRANSFORM =                        // rational[3*3]
+            ACAMERA_COLOR_CORRECTION_START + 1,
+    /**
+     * <p>Gains applying to Bayer raw color channels for
+     * white-balance.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>These per-channel gains are either set by the camera device
+     * when the request ACAMERA_COLOR_CORRECTION_MODE is not
+     * TRANSFORM_MATRIX, or directly by the application in the
+     * request when the ACAMERA_COLOR_CORRECTION_MODE is
+     * TRANSFORM_MATRIX.</p>
+     * <p>The gains in the result metadata are the gains actually
+     * applied by the camera device to the current frame.</p>
+     * <p>The valid range of gains varies on different devices, but gains
+     * between [1.0, 3.0] are guaranteed not to be clipped. Even if a given
+     * device allows gains below 1.0, this is usually not recommended because
+     * this can create color artifacts.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_MODE
+     */
+    ACAMERA_COLOR_CORRECTION_GAINS =                            // float[4]
+            ACAMERA_COLOR_CORRECTION_START + 2,
+    /**
+     * <p>Mode of operation for the chromatic aberration correction algorithm.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Chromatic (color) aberration is caused by the fact that different wavelengths of light
+     * can not focus on the same point after exiting from the lens. This metadata defines
+     * the high level control of chromatic aberration correction algorithm, which aims to
+     * minimize the chromatic artifacts that may occur along the object boundaries in an
+     * image.</p>
+     * <p>FAST/HIGH_QUALITY both mean that camera device determined aberration
+     * correction will be applied. HIGH_QUALITY mode indicates that the camera device will
+     * use the highest-quality aberration correction algorithms, even if it slows down
+     * capture rate. FAST means the camera device will not slow down capture rate when
+     * applying aberration correction.</p>
+     * <p>LEGACY devices will always be in FAST mode.</p>
+     */
+    ACAMERA_COLOR_CORRECTION_ABERRATION_MODE =                  // byte (enum)
+            ACAMERA_COLOR_CORRECTION_START + 3,
+    /**
+     * <p>List of aberration correction modes for ACAMERA_COLOR_CORRECTION_ABERRATION_MODE that are
+     * supported by this camera device.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_ABERRATION_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This key lists the valid modes for ACAMERA_COLOR_CORRECTION_ABERRATION_MODE.  If no
+     * aberration correction modes are available for a device, this list will solely include
+     * OFF mode. All camera devices will support either OFF or FAST mode.</p>
+     * <p>Camera devices that support the MANUAL_POST_PROCESSING capability will always list
+     * OFF mode. This includes all FULL level devices.</p>
+     * <p>LEGACY devices will always only support FAST mode.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_ABERRATION_MODE
+     */
+    ACAMERA_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES =       // byte[n]
+            ACAMERA_COLOR_CORRECTION_START + 4,
+    ACAMERA_COLOR_CORRECTION_END,
+
+    /**
+     * <p>The desired setting for the camera device's auto-exposure
+     * algorithm's antibanding compensation.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Some kinds of lighting fixtures, such as some fluorescent
+     * lights, flicker at the rate of the power supply frequency
+     * (60Hz or 50Hz, depending on country). While this is
+     * typically not noticeable to a person, it can be visible to
+     * a camera device. If a camera sets its exposure time to the
+     * wrong value, the flicker may become visible in the
+     * viewfinder as flicker or in a final captured image, as a
+     * set of variable-brightness bands across the image.</p>
+     * <p>Therefore, the auto-exposure routines of camera devices
+     * include antibanding routines that ensure that the chosen
+     * exposure value will not cause such banding. The choice of
+     * exposure time depends on the rate of flicker, which the
+     * camera device can detect automatically, or the expected
+     * rate can be selected by the application using this
+     * control.</p>
+     * <p>A given camera device may not support all of the possible
+     * options for the antibanding mode. The
+     * ACAMERA_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES key contains
+     * the available modes for a given camera device.</p>
+     * <p>AUTO mode is the default if it is available on given
+     * camera device. When AUTO mode is not available, the
+     * default will be either 50HZ or 60HZ, and both 50HZ
+     * and 60HZ will be available.</p>
+     * <p>If manual exposure control is enabled (by setting
+     * ACAMERA_CONTROL_AE_MODE or ACAMERA_CONTROL_MODE to OFF),
+     * then this setting has no effect, and the application must
+     * ensure it selects exposure times that do not cause banding
+     * issues. The ACAMERA_STATISTICS_SCENE_FLICKER key can assist
+     * the application in this.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_STATISTICS_SCENE_FLICKER
+     */
+    ACAMERA_CONTROL_AE_ANTIBANDING_MODE =                       // byte (enum)
+            ACAMERA_CONTROL_START,
+    /**
+     * <p>Adjustment to auto-exposure (AE) target image
+     * brightness.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>The adjustment is measured as a count of steps, with the
+     * step size defined by ACAMERA_CONTROL_AE_COMPENSATION_STEP and the
+     * allowed range by ACAMERA_CONTROL_AE_COMPENSATION_RANGE.</p>
+     * <p>For example, if the exposure value (EV) step is 0.333, '6'
+     * will mean an exposure compensation of +2 EV; -3 will mean an
+     * exposure compensation of -1 EV. One EV represents a doubling
+     * of image brightness. Note that this control will only be
+     * effective if ACAMERA_CONTROL_AE_MODE <code>!=</code> OFF. This control
+     * will take effect even when ACAMERA_CONTROL_AE_LOCK <code>== true</code>.</p>
+     * <p>In the event of exposure compensation value being changed, camera device
+     * may take several frames to reach the newly requested exposure target.
+     * During that time, ACAMERA_CONTROL_AE_STATE field will be in the SEARCHING
+     * state. Once the new exposure target is reached, ACAMERA_CONTROL_AE_STATE will
+     * change from SEARCHING to either CONVERGED, LOCKED (if AE lock is enabled), or
+     * FLASH_REQUIRED (if the scene is too dark for still capture).</p>
+     *
+     * @see ACAMERA_CONTROL_AE_COMPENSATION_RANGE
+     * @see ACAMERA_CONTROL_AE_COMPENSATION_STEP
+     * @see ACAMERA_CONTROL_AE_LOCK
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AE_STATE
+     */
+    ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION =                  // int32
+            ACAMERA_CONTROL_START + 1,
+    /**
+     * <p>Whether auto-exposure (AE) is currently locked to its latest
+     * calculated values.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When set to <code>true</code> (ON), the AE algorithm is locked to its latest parameters,
+     * and will not change exposure settings until the lock is set to <code>false</code> (OFF).</p>
+     * <p>Note that even when AE is locked, the flash may be fired if
+     * the ACAMERA_CONTROL_AE_MODE is ON_AUTO_FLASH /
+     * ON_ALWAYS_FLASH / ON_AUTO_FLASH_REDEYE.</p>
+     * <p>When ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION is changed, even if the AE lock
+     * is ON, the camera device will still adjust its exposure value.</p>
+     * <p>If AE precapture is triggered (see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER)
+     * when AE is already locked, the camera device will not change the exposure time
+     * (ACAMERA_SENSOR_EXPOSURE_TIME) and sensitivity (ACAMERA_SENSOR_SENSITIVITY)
+     * parameters. The flash may be fired if the ACAMERA_CONTROL_AE_MODE
+     * is ON_AUTO_FLASH/ON_AUTO_FLASH_REDEYE and the scene is too dark. If the
+     * ACAMERA_CONTROL_AE_MODE is ON_ALWAYS_FLASH, the scene may become overexposed.
+     * Similarly, AE precapture trigger CANCEL has no effect when AE is already locked.</p>
+     * <p>When an AE precapture sequence is triggered, AE unlock will not be able to unlock
+     * the AE if AE is locked by the camera device internally during precapture metering
+     * sequence In other words, submitting requests with AE unlock has no effect for an
+     * ongoing precapture metering sequence. Otherwise, the precapture metering sequence
+     * will never succeed in a sequence of preview requests where AE lock is always set
+     * to <code>false</code>.</p>
+     * <p>Since the camera device has a pipeline of in-flight requests, the settings that
+     * get locked do not necessarily correspond to the settings that were present in the
+     * latest capture result received from the camera device, since additional captures
+     * and AE updates may have occurred even before the result was sent out. If an
+     * application is switching between automatic and manual control and wishes to eliminate
+     * any flicker during the switch, the following procedure is recommended:</p>
+     * <ol>
+     * <li>Starting in auto-AE mode:</li>
+     * <li>Lock AE</li>
+     * <li>Wait for the first result to be output that has the AE locked</li>
+     * <li>Copy exposure settings from that result into a request, set the request to manual AE</li>
+     * <li>Submit the capture request, proceed to run manual AE as desired.</li>
+     * </ol>
+     * <p>See ACAMERA_CONTROL_AE_STATE for AE lock related state transition details.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * @see ACAMERA_CONTROL_AE_STATE
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_CONTROL_AE_LOCK =                                   // byte (enum)
+            ACAMERA_CONTROL_START + 2,
+    /**
+     * <p>The desired mode for the camera device's
+     * auto-exposure routine.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This control is only effective if ACAMERA_CONTROL_MODE is
+     * AUTO.</p>
+     * <p>When set to any of the ON modes, the camera device's
+     * auto-exposure routine is enabled, overriding the
+     * application's selected exposure time, sensor sensitivity,
+     * and frame duration (ACAMERA_SENSOR_EXPOSURE_TIME,
+     * ACAMERA_SENSOR_SENSITIVITY, and
+     * ACAMERA_SENSOR_FRAME_DURATION). If one of the FLASH modes
+     * is selected, the camera device's flash unit controls are
+     * also overridden.</p>
+     * <p>The FLASH modes are only available if the camera device
+     * has a flash unit (ACAMERA_FLASH_INFO_AVAILABLE is <code>true</code>).</p>
+     * <p>If flash TORCH mode is desired, this field must be set to
+     * ON or OFF, and ACAMERA_FLASH_MODE set to TORCH.</p>
+     * <p>When set to any of the ON modes, the values chosen by the
+     * camera device auto-exposure routine for the overridden
+     * fields for a given capture will be available in its
+     * CaptureResult.</p>
+     *
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_FLASH_INFO_AVAILABLE
+     * @see ACAMERA_FLASH_MODE
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_CONTROL_AE_MODE =                                   // byte (enum)
+            ACAMERA_CONTROL_START + 3,
+    /**
+     * <p>List of metering areas to use for auto-exposure adjustment.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Not available if android.control.maxRegionsAe is 0.
+     * Otherwise will always be present.</p>
+     * <p>The maximum number of regions supported by the device is determined by the value
+     * of android.control.maxRegionsAe.</p>
+     * <p>The data representation is int[5 * area_count].
+     * Every five elements represent a metering region of (xmin, ymin, xmax, ymax, weight).
+     * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
+     * ymax.</p>
+     * <p>The coordinate system is based on the active pixel array,
+     * with (0,0) being the top-left pixel in the active pixel array, and
+     * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the
+     * bottom-right pixel in the active pixel array.</p>
+     * <p>The weight must be within <code>[0, 1000]</code>, and represents a weight
+     * for every pixel in the area. This means that a large metering area
+     * with the same weight as a smaller area will have more effect in
+     * the metering result. Metering areas can partially overlap and the
+     * camera device will add the weights in the overlap region.</p>
+     * <p>The weights are relative to weights of other exposure metering regions, so if only one
+     * region is used, all non-zero weights will have the same effect. A region with 0
+     * weight is ignored.</p>
+     * <p>If all regions have 0 weight, then no specific metering area needs to be used by the
+     * camera device.</p>
+     * <p>If the metering region is outside the used ACAMERA_SCALER_CROP_REGION returned in
+     * capture result metadata, the camera device will ignore the sections outside the crop
+     * region and output only the intersection rectangle as the metering region in the result
+     * metadata.  If the region is entirely outside the crop region, it will be ignored and
+     * not reported in the result metadata.</p>
+     *
+     * @see ACAMERA_SCALER_CROP_REGION
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_CONTROL_AE_REGIONS =                                // int32[5*area_count]
+            ACAMERA_CONTROL_START + 4,
+    /**
+     * <p>Range over which the auto-exposure routine can
+     * adjust the capture frame rate to maintain good
+     * exposure.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Only constrains auto-exposure (AE) algorithm, not
+     * manual control of ACAMERA_SENSOR_EXPOSURE_TIME and
+     * ACAMERA_SENSOR_FRAME_DURATION.</p>
+     *
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     */
+    ACAMERA_CONTROL_AE_TARGET_FPS_RANGE =                       // int32[2]
+            ACAMERA_CONTROL_START + 5,
+    /**
+     * <p>Whether the camera device will trigger a precapture
+     * metering sequence when it processes this request.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This entry is normally set to IDLE, or is not
+     * included at all in the request settings. When included and
+     * set to START, the camera device will trigger the auto-exposure (AE)
+     * precapture metering sequence.</p>
+     * <p>When set to CANCEL, the camera device will cancel any active
+     * precapture metering trigger, and return to its initial AE state.
+     * If a precapture metering sequence is already completed, and the camera
+     * device has implicitly locked the AE for subsequent still capture, the
+     * CANCEL trigger will unlock the AE and return to its initial AE state.</p>
+     * <p>The precapture sequence should be triggered before starting a
+     * high-quality still capture for final metering decisions to
+     * be made, and for firing pre-capture flash pulses to estimate
+     * scene brightness and required final capture flash power, when
+     * the flash is enabled.</p>
+     * <p>Normally, this entry should be set to START for only a
+     * single request, and the application should wait until the
+     * sequence completes before starting a new one.</p>
+     * <p>When a precapture metering sequence is finished, the camera device
+     * may lock the auto-exposure routine internally to be able to accurately expose the
+     * subsequent still capture image (<code>ACAMERA_CONTROL_CAPTURE_INTENT == STILL_CAPTURE</code>).
+     * For this case, the AE may not resume normal scan if no subsequent still capture is
+     * submitted. To ensure that the AE routine restarts normal scan, the application should
+     * submit a request with <code>ACAMERA_CONTROL_AE_LOCK == true</code>, followed by a request
+     * with <code>ACAMERA_CONTROL_AE_LOCK == false</code>, if the application decides not to submit a
+     * still capture request after the precapture sequence completes. Alternatively, for
+     * API level 23 or newer devices, the CANCEL can be used to unlock the camera device
+     * internally locked AE if the application doesn't submit a still capture request after
+     * the AE precapture trigger. Note that, the CANCEL was added in API level 23, and must not
+     * be used in devices that have earlier API levels.</p>
+     * <p>The exact effect of auto-exposure (AE) precapture trigger
+     * depends on the current AE mode and state; see
+     * ACAMERA_CONTROL_AE_STATE for AE precapture state transition
+     * details.</p>
+     * <p>On LEGACY-level devices, the precapture trigger is not supported;
+     * capturing a high-resolution JPEG image will automatically trigger a
+     * precapture sequence before the high-resolution capture, including
+     * potentially firing a pre-capture flash.</p>
+     * <p>Using the precapture trigger and the auto-focus trigger ACAMERA_CONTROL_AF_TRIGGER
+     * simultaneously is allowed. However, since these triggers often require cooperation between
+     * the auto-focus and auto-exposure routines (for example, the may need to be enabled for a
+     * focus sweep), the camera device may delay acting on a later trigger until the previous
+     * trigger has been fully handled. This may lead to longer intervals between the trigger and
+     * changes to ACAMERA_CONTROL_AE_STATE indicating the start of the precapture sequence, for
+     * example.</p>
+     * <p>If both the precapture and the auto-focus trigger are activated on the same request, then
+     * the camera device will complete them in the optimal order for that device.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_LOCK
+     * @see ACAMERA_CONTROL_AE_STATE
+     * @see ACAMERA_CONTROL_AF_TRIGGER
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     */
+    ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER =                     // byte (enum)
+            ACAMERA_CONTROL_START + 6,
+    /**
+     * <p>Whether auto-focus (AF) is currently enabled, and what
+     * mode it is set to.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Only effective if ACAMERA_CONTROL_MODE = AUTO and the lens is not fixed focus
+     * (i.e. <code>ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE &gt; 0</code>). Also note that
+     * when ACAMERA_CONTROL_AE_MODE is OFF, the behavior of AF is device
+     * dependent. It is recommended to lock AF by using ACAMERA_CONTROL_AF_TRIGGER before
+     * setting ACAMERA_CONTROL_AE_MODE to OFF, or set AF mode to OFF when AE is OFF.</p>
+     * <p>If the lens is controlled by the camera device auto-focus algorithm,
+     * the camera device will report the current AF status in ACAMERA_CONTROL_AF_STATE
+     * in result metadata.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AF_STATE
+     * @see ACAMERA_CONTROL_AF_TRIGGER
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
+     */
+    ACAMERA_CONTROL_AF_MODE =                                   // byte (enum)
+            ACAMERA_CONTROL_START + 7,
+    /**
+     * <p>List of metering areas to use for auto-focus.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Not available if android.control.maxRegionsAf is 0.
+     * Otherwise will always be present.</p>
+     * <p>The maximum number of focus areas supported by the device is determined by the value
+     * of android.control.maxRegionsAf.</p>
+     * <p>The data representation is int[5 * area_count].
+     * Every five elements represent a metering region of (xmin, ymin, xmax, ymax, weight).
+     * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
+     * ymax.</p>
+     * <p>The coordinate system is based on the active pixel array,
+     * with (0,0) being the top-left pixel in the active pixel array, and
+     * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the
+     * bottom-right pixel in the active pixel array.</p>
+     * <p>The weight must be within <code>[0, 1000]</code>, and represents a weight
+     * for every pixel in the area. This means that a large metering area
+     * with the same weight as a smaller area will have more effect in
+     * the metering result. Metering areas can partially overlap and the
+     * camera device will add the weights in the overlap region.</p>
+     * <p>The weights are relative to weights of other metering regions, so if only one region
+     * is used, all non-zero weights will have the same effect. A region with 0 weight is
+     * ignored.</p>
+     * <p>If all regions have 0 weight, then no specific metering area needs to be used by the
+     * camera device.</p>
+     * <p>If the metering region is outside the used ACAMERA_SCALER_CROP_REGION returned in
+     * capture result metadata, the camera device will ignore the sections outside the crop
+     * region and output only the intersection rectangle as the metering region in the result
+     * metadata. If the region is entirely outside the crop region, it will be ignored and
+     * not reported in the result metadata.</p>
+     *
+     * @see ACAMERA_SCALER_CROP_REGION
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_CONTROL_AF_REGIONS =                                // int32[5*area_count]
+            ACAMERA_CONTROL_START + 8,
+    /**
+     * <p>Whether the camera device will trigger autofocus for this request.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This entry is normally set to IDLE, or is not
+     * included at all in the request settings.</p>
+     * <p>When included and set to START, the camera device will trigger the
+     * autofocus algorithm. If autofocus is disabled, this trigger has no effect.</p>
+     * <p>When set to CANCEL, the camera device will cancel any active trigger,
+     * and return to its initial AF state.</p>
+     * <p>Generally, applications should set this entry to START or CANCEL for only a
+     * single capture, and then return it to IDLE (or not set at all). Specifying
+     * START for multiple captures in a row means restarting the AF operation over
+     * and over again.</p>
+     * <p>See ACAMERA_CONTROL_AF_STATE for what the trigger means for each AF mode.</p>
+     * <p>Using the autofocus trigger and the precapture trigger ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * simultaneously is allowed. However, since these triggers often require cooperation between
+     * the auto-focus and auto-exposure routines (for example, the may need to be enabled for a
+     * focus sweep), the camera device may delay acting on a later trigger until the previous
+     * trigger has been fully handled. This may lead to longer intervals between the trigger and
+     * changes to ACAMERA_CONTROL_AF_STATE, for example.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * @see ACAMERA_CONTROL_AF_STATE
+     */
+    ACAMERA_CONTROL_AF_TRIGGER =                                // byte (enum)
+            ACAMERA_CONTROL_START + 9,
+    /**
+     * <p>Whether auto-white balance (AWB) is currently locked to its
+     * latest calculated values.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When set to <code>true</code> (ON), the AWB algorithm is locked to its latest parameters,
+     * and will not change color balance settings until the lock is set to <code>false</code> (OFF).</p>
+     * <p>Since the camera device has a pipeline of in-flight requests, the settings that
+     * get locked do not necessarily correspond to the settings that were present in the
+     * latest capture result received from the camera device, since additional captures
+     * and AWB updates may have occurred even before the result was sent out. If an
+     * application is switching between automatic and manual control and wishes to eliminate
+     * any flicker during the switch, the following procedure is recommended:</p>
+     * <ol>
+     * <li>Starting in auto-AWB mode:</li>
+     * <li>Lock AWB</li>
+     * <li>Wait for the first result to be output that has the AWB locked</li>
+     * <li>Copy AWB settings from that result into a request, set the request to manual AWB</li>
+     * <li>Submit the capture request, proceed to run manual AWB as desired.</li>
+     * </ol>
+     * <p>Note that AWB lock is only meaningful when
+     * ACAMERA_CONTROL_AWB_MODE is in the AUTO mode; in other modes,
+     * AWB is already fixed to a specific setting.</p>
+     * <p>Some LEGACY devices may not support ON; the value is then overridden to OFF.</p>
+     *
+     * @see ACAMERA_CONTROL_AWB_MODE
+     */
+    ACAMERA_CONTROL_AWB_LOCK =                                  // byte (enum)
+            ACAMERA_CONTROL_START + 10,
+    /**
+     * <p>Whether auto-white balance (AWB) is currently setting the color
+     * transform fields, and what its illumination target
+     * is.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This control is only effective if ACAMERA_CONTROL_MODE is AUTO.</p>
+     * <p>When set to the ON mode, the camera device's auto-white balance
+     * routine is enabled, overriding the application's selected
+     * ACAMERA_COLOR_CORRECTION_TRANSFORM, ACAMERA_COLOR_CORRECTION_GAINS and
+     * ACAMERA_COLOR_CORRECTION_MODE. Note that when ACAMERA_CONTROL_AE_MODE
+     * is OFF, the behavior of AWB is device dependent. It is recommened to
+     * also set AWB mode to OFF or lock AWB by using ACAMERA_CONTROL_AWB_LOCK before
+     * setting AE mode to OFF.</p>
+     * <p>When set to the OFF mode, the camera device's auto-white balance
+     * routine is disabled. The application manually controls the white
+     * balance by ACAMERA_COLOR_CORRECTION_TRANSFORM, ACAMERA_COLOR_CORRECTION_GAINS
+     * and ACAMERA_COLOR_CORRECTION_MODE.</p>
+     * <p>When set to any other modes, the camera device's auto-white
+     * balance routine is disabled. The camera device uses each
+     * particular illumination target for white balance
+     * adjustment. The application's values for
+     * ACAMERA_COLOR_CORRECTION_TRANSFORM,
+     * ACAMERA_COLOR_CORRECTION_GAINS and
+     * ACAMERA_COLOR_CORRECTION_MODE are ignored.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_MODE
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AWB_LOCK
+     * @see ACAMERA_CONTROL_MODE
+     */
+    ACAMERA_CONTROL_AWB_MODE =                                  // byte (enum)
+            ACAMERA_CONTROL_START + 11,
+    /**
+     * <p>List of metering areas to use for auto-white-balance illuminant
+     * estimation.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Not available if android.control.maxRegionsAwb is 0.
+     * Otherwise will always be present.</p>
+     * <p>The maximum number of regions supported by the device is determined by the value
+     * of android.control.maxRegionsAwb.</p>
+     * <p>The data representation is int[5 * area_count].
+     * Every five elements represent a metering region of (xmin, ymin, xmax, ymax, weight).
+     * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
+     * ymax.</p>
+     * <p>The coordinate system is based on the active pixel array,
+     * with (0,0) being the top-left pixel in the active pixel array, and
+     * (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.height - 1) being the
+     * bottom-right pixel in the active pixel array.</p>
+     * <p>The weight must range from 0 to 1000, and represents a weight
+     * for every pixel in the area. This means that a large metering area
+     * with the same weight as a smaller area will have more effect in
+     * the metering result. Metering areas can partially overlap and the
+     * camera device will add the weights in the overlap region.</p>
+     * <p>The weights are relative to weights of other white balance metering regions, so if
+     * only one region is used, all non-zero weights will have the same effect. A region with
+     * 0 weight is ignored.</p>
+     * <p>If all regions have 0 weight, then no specific metering area needs to be used by the
+     * camera device.</p>
+     * <p>If the metering region is outside the used ACAMERA_SCALER_CROP_REGION returned in
+     * capture result metadata, the camera device will ignore the sections outside the crop
+     * region and output only the intersection rectangle as the metering region in the result
+     * metadata.  If the region is entirely outside the crop region, it will be ignored and
+     * not reported in the result metadata.</p>
+     *
+     * @see ACAMERA_SCALER_CROP_REGION
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_CONTROL_AWB_REGIONS =                               // int32[5*area_count]
+            ACAMERA_CONTROL_START + 12,
+    /**
+     * <p>Information to the camera device 3A (auto-exposure,
+     * auto-focus, auto-white balance) routines about the purpose
+     * of this capture, to help the camera device to decide optimal 3A
+     * strategy.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This control (except for MANUAL) is only effective if
+     * <code>ACAMERA_CONTROL_MODE != OFF</code> and any 3A routine is active.</p>
+     * <p>ZERO_SHUTTER_LAG will be supported if ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     * contains PRIVATE_REPROCESSING or YUV_REPROCESSING. MANUAL will be supported if
+     * ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains MANUAL_SENSOR. Other intent values are
+     * always supported.</p>
+     *
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     */
+    ACAMERA_CONTROL_CAPTURE_INTENT =                            // byte (enum)
+            ACAMERA_CONTROL_START + 13,
+    /**
+     * <p>A special color effect to apply.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When this mode is set, a color effect will be applied
+     * to images produced by the camera device. The interpretation
+     * and implementation of these color effects is left to the
+     * implementor of the camera device, and should not be
+     * depended on to be consistent (or present) across all
+     * devices.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE =                               // byte (enum)
+            ACAMERA_CONTROL_START + 14,
+    /**
+     * <p>Overall mode of 3A (auto-exposure, auto-white-balance, auto-focus) control
+     * routines.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This is a top-level 3A control switch. When set to OFF, all 3A control
+     * by the camera device is disabled. The application must set the fields for
+     * capture parameters itself.</p>
+     * <p>When set to AUTO, the individual algorithm controls in
+     * ACAMERA_CONTROL_* are in effect, such as ACAMERA_CONTROL_AF_MODE.</p>
+     * <p>When set to USE_SCENE_MODE, the individual controls in
+     * ACAMERA_CONTROL_* are mostly disabled, and the camera device implements
+     * one of the scene mode settings (such as ACTION, SUNSET, or PARTY)
+     * as it wishes. The camera device scene mode 3A settings are provided by
+     * capture results {@link ACameraMetadata} from
+     * {@link ACameraCaptureSession_captureCallback_result}.</p>
+     * <p>When set to OFF_KEEP_STATE, it is similar to OFF mode, the only difference
+     * is that this frame will not be used by camera device background 3A statistics
+     * update, as if this frame is never captured. This mode can be used in the scenario
+     * where the application doesn't want a 3A manual control capture to affect
+     * the subsequent auto 3A capture results.</p>
+     *
+     * @see ACAMERA_CONTROL_AF_MODE
+     */
+    ACAMERA_CONTROL_MODE =                                      // byte (enum)
+            ACAMERA_CONTROL_START + 15,
+    /**
+     * <p>Control for which scene mode is currently active.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Scene modes are custom camera modes optimized for a certain set of conditions and
+     * capture settings.</p>
+     * <p>This is the mode that that is active when
+     * <code>ACAMERA_CONTROL_MODE == USE_SCENE_MODE</code>. Aside from FACE_PRIORITY, these modes will
+     * disable ACAMERA_CONTROL_AE_MODE, ACAMERA_CONTROL_AWB_MODE, and ACAMERA_CONTROL_AF_MODE
+     * while in use.</p>
+     * <p>The interpretation and implementation of these scene modes is left
+     * to the implementor of the camera device. Their behavior will not be
+     * consistent across all devices, and any given device may only implement
+     * a subset of these modes.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AF_MODE
+     * @see ACAMERA_CONTROL_AWB_MODE
+     * @see ACAMERA_CONTROL_MODE
+     */
+    ACAMERA_CONTROL_SCENE_MODE =                                // byte (enum)
+            ACAMERA_CONTROL_START + 16,
+    /**
+     * <p>Whether video stabilization is
+     * active.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Video stabilization automatically warps images from
+     * the camera in order to stabilize motion between consecutive frames.</p>
+     * <p>If enabled, video stabilization can modify the
+     * ACAMERA_SCALER_CROP_REGION to keep the video stream stabilized.</p>
+     * <p>Switching between different video stabilization modes may take several
+     * frames to initialize, the camera device will report the current mode
+     * in capture result metadata. For example, When "ON" mode is requested,
+     * the video stabilization modes in the first several capture results may
+     * still be "OFF", and it will become "ON" when the initialization is
+     * done.</p>
+     * <p>In addition, not all recording sizes or frame rates may be supported for
+     * stabilization by a device that reports stabilization support. It is guaranteed
+     * that an output targeting a MediaRecorder or MediaCodec will be stabilized if
+     * the recording resolution is less than or equal to 1920 x 1080 (width less than
+     * or equal to 1920, height less than or equal to 1080), and the recording
+     * frame rate is less than or equal to 30fps.  At other sizes, the CaptureResult
+     * ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE field will return
+     * OFF if the recording output is not stabilized, or if there are no output
+     * Surface types that can be stabilized.</p>
+     * <p>If a camera device supports both this mode and OIS
+     * (ACAMERA_LENS_OPTICAL_STABILIZATION_MODE), turning both modes on may
+     * produce undesirable interaction, so it is recommended not to enable
+     * both at the same time.</p>
+     *
+     * @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
+     * @see ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
+     * @see ACAMERA_SCALER_CROP_REGION
+     */
+    ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE =                  // byte (enum)
+            ACAMERA_CONTROL_START + 17,
+    /**
+     * <p>List of auto-exposure antibanding modes for ACAMERA_CONTROL_AE_ANTIBANDING_MODE that are
+     * supported by this camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_ANTIBANDING_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Not all of the auto-exposure anti-banding modes may be
+     * supported by a given camera device. This field lists the
+     * valid anti-banding modes that the application may request
+     * for this camera device with the
+     * ACAMERA_CONTROL_AE_ANTIBANDING_MODE control.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_ANTIBANDING_MODE
+     */
+    ACAMERA_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES =            // byte[n]
+            ACAMERA_CONTROL_START + 18,
+    /**
+     * <p>List of auto-exposure modes for ACAMERA_CONTROL_AE_MODE that are supported by this camera
+     * device.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Not all the auto-exposure modes may be supported by a
+     * given camera device, especially if no flash unit is
+     * available. This entry lists the valid modes for
+     * ACAMERA_CONTROL_AE_MODE for this camera device.</p>
+     * <p>All camera devices support ON, and all camera devices with flash
+     * units support ON_AUTO_FLASH and ON_ALWAYS_FLASH.</p>
+     * <p>FULL mode camera devices always support OFF mode,
+     * which enables application control of camera exposure time,
+     * sensitivity, and frame duration.</p>
+     * <p>LEGACY mode camera devices never support OFF mode.
+     * LIMITED mode devices support OFF if they support the MANUAL_SENSOR
+     * capability.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     */
+    ACAMERA_CONTROL_AE_AVAILABLE_MODES =                        // byte[n]
+            ACAMERA_CONTROL_START + 19,
+    /**
+     * <p>List of frame rate ranges for ACAMERA_CONTROL_AE_TARGET_FPS_RANGE supported by
+     * this camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_TARGET_FPS_RANGE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>For devices at the LEGACY level or above:</p>
+     * <ul>
+     * <li>
+     * <p>For constant-framerate recording, for each normal
+     * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a>, that is, a
+     * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a> that has
+     * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#quality">quality</a>
+     * in the range [
+     * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#QUALITY_LOW">QUALITY_LOW</a>,
+     * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#QUALITY_2160P">QUALITY_2160P</a>],
+     * if the profile is supported by the device and has
+     * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#videoFrameRate">videoFrameRate</a>
+     * <code>x</code>, this list will always include (<code>x</code>,<code>x</code>).</p>
+     * </li>
+     * <li>
+     * <p>Also, a camera device must either not support any
+     * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a>,
+     * or support at least one
+     * normal <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a>
+     * that has
+     * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#videoFrameRate">videoFrameRate</a> <code>x</code> &gt;= 24.</p>
+     * </li>
+     * </ul>
+     * <p>For devices at the LIMITED level or above:</p>
+     * <ul>
+     * <li>For YUV_420_888 burst capture use case, this list will always include (<code>min</code>, <code>max</code>)
+     * and (<code>max</code>, <code>max</code>) where <code>min</code> &lt;= 15 and <code>max</code> = the maximum output frame rate of the
+     * maximum YUV_420_888 output size.</li>
+     * </ul>
+     */
+    ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES =            // int32[2*n]
+            ACAMERA_CONTROL_START + 20,
+    /**
+     * <p>Maximum and minimum exposure compensation values for
+     * ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION, in counts of ACAMERA_CONTROL_AE_COMPENSATION_STEP,
+     * that are supported by this camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_COMPENSATION_STEP
+     * @see ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_CONTROL_AE_COMPENSATION_RANGE =                     // int32[2]
+            ACAMERA_CONTROL_START + 21,
+    /**
+     * <p>Smallest step by which the exposure compensation
+     * can be changed.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This is the unit for ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION. For example, if this key has
+     * a value of <code>1/2</code>, then a setting of <code>-2</code> for ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION means
+     * that the target EV offset for the auto-exposure routine is -1 EV.</p>
+     * <p>One unit of EV compensation changes the brightness of the captured image by a factor
+     * of two. +1 EV doubles the image brightness, while -1 EV halves the image brightness.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION
+     */
+    ACAMERA_CONTROL_AE_COMPENSATION_STEP =                      // rational
+            ACAMERA_CONTROL_START + 22,
+    /**
+     * <p>List of auto-focus (AF) modes for ACAMERA_CONTROL_AF_MODE that are
+     * supported by this camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_AF_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Not all the auto-focus modes may be supported by a
+     * given camera device. This entry lists the valid modes for
+     * ACAMERA_CONTROL_AF_MODE for this camera device.</p>
+     * <p>All LIMITED and FULL mode camera devices will support OFF mode, and all
+     * camera devices with adjustable focuser units
+     * (<code>ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE &gt; 0</code>) will support AUTO mode.</p>
+     * <p>LEGACY devices will support OFF mode only if they support
+     * focusing to infinity (by also setting ACAMERA_LENS_FOCUS_DISTANCE to
+     * <code>0.0f</code>).</p>
+     *
+     * @see ACAMERA_CONTROL_AF_MODE
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
+     */
+    ACAMERA_CONTROL_AF_AVAILABLE_MODES =                        // byte[n]
+            ACAMERA_CONTROL_START + 23,
+    /**
+     * <p>List of color effects for ACAMERA_CONTROL_EFFECT_MODE that are supported by this camera
+     * device.</p>
+     *
+     * @see ACAMERA_CONTROL_EFFECT_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This list contains the color effect modes that can be applied to
+     * images produced by the camera device.
+     * Implementations are not expected to be consistent across all devices.
+     * If no color effect modes are available for a device, this will only list
+     * OFF.</p>
+     * <p>A color effect will only be applied if
+     * ACAMERA_CONTROL_MODE != OFF.  OFF is always included in this list.</p>
+     * <p>This control has no effect on the operation of other control routines such
+     * as auto-exposure, white balance, or focus.</p>
+     *
+     * @see ACAMERA_CONTROL_MODE
+     */
+    ACAMERA_CONTROL_AVAILABLE_EFFECTS =                         // byte[n]
+            ACAMERA_CONTROL_START + 24,
+    /**
+     * <p>List of scene modes for ACAMERA_CONTROL_SCENE_MODE that are supported by this camera
+     * device.</p>
+     *
+     * @see ACAMERA_CONTROL_SCENE_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This list contains scene modes that can be set for the camera device.
+     * Only scene modes that have been fully implemented for the
+     * camera device may be included here. Implementations are not expected
+     * to be consistent across all devices.</p>
+     * <p>If no scene modes are supported by the camera device, this
+     * will be set to DISABLED. Otherwise DISABLED will not be listed.</p>
+     * <p>FACE_PRIORITY is always listed if face detection is
+     * supported (i.e.<code>ACAMERA_STATISTICS_INFO_MAX_FACE_COUNT &gt;
+     * 0</code>).</p>
+     *
+     * @see ACAMERA_STATISTICS_INFO_MAX_FACE_COUNT
+     */
+    ACAMERA_CONTROL_AVAILABLE_SCENE_MODES =                     // byte[n]
+            ACAMERA_CONTROL_START + 25,
+    /**
+     * <p>List of video stabilization modes for ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
+     * that are supported by this camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>OFF will always be listed.</p>
+     */
+    ACAMERA_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES =       // byte[n]
+            ACAMERA_CONTROL_START + 26,
+    /**
+     * <p>List of auto-white-balance modes for ACAMERA_CONTROL_AWB_MODE that are supported by this
+     * camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_AWB_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Not all the auto-white-balance modes may be supported by a
+     * given camera device. This entry lists the valid modes for
+     * ACAMERA_CONTROL_AWB_MODE for this camera device.</p>
+     * <p>All camera devices will support ON mode.</p>
+     * <p>Camera devices that support the MANUAL_POST_PROCESSING capability will always support OFF
+     * mode, which enables application control of white balance, by using
+     * ACAMERA_COLOR_CORRECTION_TRANSFORM and ACAMERA_COLOR_CORRECTION_GAINS(ACAMERA_COLOR_CORRECTION_MODE must be set to TRANSFORM_MATRIX). This includes all FULL
+     * mode camera devices.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_MODE
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * @see ACAMERA_CONTROL_AWB_MODE
+     */
+    ACAMERA_CONTROL_AWB_AVAILABLE_MODES =                       // byte[n]
+            ACAMERA_CONTROL_START + 27,
+    /**
+     * <p>List of the maximum number of regions that can be used for metering in
+     * auto-exposure (AE), auto-white balance (AWB), and auto-focus (AF);
+     * this corresponds to the the maximum number of elements in
+     * ACAMERA_CONTROL_AE_REGIONS, ACAMERA_CONTROL_AWB_REGIONS,
+     * and ACAMERA_CONTROL_AF_REGIONS.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_REGIONS
+     * @see ACAMERA_CONTROL_AF_REGIONS
+     * @see ACAMERA_CONTROL_AWB_REGIONS
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_CONTROL_MAX_REGIONS =                               // int32[3]
+            ACAMERA_CONTROL_START + 28,
+    /**
+     * <p>Current state of the auto-exposure (AE) algorithm.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Switching between or enabling AE modes (ACAMERA_CONTROL_AE_MODE) always
+     * resets the AE state to INACTIVE. Similarly, switching between ACAMERA_CONTROL_MODE,
+     * or ACAMERA_CONTROL_SCENE_MODE if <code>ACAMERA_CONTROL_MODE == USE_SCENE_MODE</code> resets all
+     * the algorithm states to INACTIVE.</p>
+     * <p>The camera device can do several state transitions between two results, if it is
+     * allowed by the state transition table. For example: INACTIVE may never actually be
+     * seen in a result.</p>
+     * <p>The state in the result is the state for this image (in sync with this image): if
+     * AE state becomes CONVERGED, then the image data associated with this result should
+     * be good to use.</p>
+     * <p>Below are state transition tables for different AE modes.</p>
+     * <p>State       | Transition Cause | New State | Notes
+     * :------------:|:----------------:|:---------:|:-----------------------:
+     * INACTIVE      |                  | INACTIVE  | Camera device auto exposure algorithm is disabled</p>
+     * <p>When ACAMERA_CONTROL_AE_MODE is AE_MODE_ON_*:</p>
+     * <p>State        | Transition Cause                             | New State      | Notes
+     * :-------------:|:--------------------------------------------:|:--------------:|:-----------------:
+     * INACTIVE       | Camera device initiates AE scan              | SEARCHING      | Values changing
+     * INACTIVE       | ACAMERA_CONTROL_AE_LOCK is ON                 | LOCKED         | Values locked
+     * SEARCHING      | Camera device finishes AE scan               | CONVERGED      | Good values, not changing
+     * SEARCHING      | Camera device finishes AE scan               | FLASH_REQUIRED | Converged but too dark w/o flash
+     * SEARCHING      | ACAMERA_CONTROL_AE_LOCK is ON                 | LOCKED         | Values locked
+     * CONVERGED      | Camera device initiates AE scan              | SEARCHING      | Values changing
+     * CONVERGED      | ACAMERA_CONTROL_AE_LOCK is ON                 | LOCKED         | Values locked
+     * FLASH_REQUIRED | Camera device initiates AE scan              | SEARCHING      | Values changing
+     * FLASH_REQUIRED | ACAMERA_CONTROL_AE_LOCK is ON                 | LOCKED         | Values locked
+     * LOCKED         | ACAMERA_CONTROL_AE_LOCK is OFF                | SEARCHING      | Values not good after unlock
+     * LOCKED         | ACAMERA_CONTROL_AE_LOCK is OFF                | CONVERGED      | Values good after unlock
+     * LOCKED         | ACAMERA_CONTROL_AE_LOCK is OFF                | FLASH_REQUIRED | Exposure good, but too dark
+     * PRECAPTURE     | Sequence done. ACAMERA_CONTROL_AE_LOCK is OFF | CONVERGED      | Ready for high-quality capture
+     * PRECAPTURE     | Sequence done. ACAMERA_CONTROL_AE_LOCK is ON  | LOCKED         | Ready for high-quality capture
+     * LOCKED         | aeLock is ON and aePrecaptureTrigger is START | LOCKED        | Precapture trigger is ignored when AE is already locked
+     * LOCKED         | aeLock is ON and aePrecaptureTrigger is CANCEL| LOCKED        | Precapture trigger is ignored when AE is already locked
+     * Any state (excluding LOCKED) | ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is START | PRECAPTURE     | Start AE precapture metering sequence
+     * Any state (excluding LOCKED) | ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is CANCEL| INACTIVE       | Currently active precapture metering sequence is canceled</p>
+     * <p>For the above table, the camera device may skip reporting any state changes that happen
+     * without application intervention (i.e. mode switch, trigger, locking). Any state that
+     * can be skipped in that manner is called a transient state.</p>
+     * <p>For example, for above AE modes (AE_MODE_ON_*), in addition to the state transitions
+     * listed in above table, it is also legal for the camera device to skip one or more
+     * transient states between two results. See below table for examples:</p>
+     * <p>State        | Transition Cause                                            | New State      | Notes
+     * :-------------:|:-----------------------------------------------------------:|:--------------:|:-----------------:
+     * INACTIVE       | Camera device finished AE scan                              | CONVERGED      | Values are already good, transient states are skipped by camera device.
+     * Any state (excluding LOCKED) | ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is START, sequence done | FLASH_REQUIRED | Converged but too dark w/o flash after a precapture sequence, transient states are skipped by camera device.
+     * Any state (excluding LOCKED) | ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is START, sequence done | CONVERGED      | Converged after a precapture sequence, transient states are skipped by camera device.
+     * Any state (excluding LOCKED) | ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is CANCEL, converged    | FLASH_REQUIRED | Converged but too dark w/o flash after a precapture sequence is canceled, transient states are skipped by camera device.
+     * Any state (excluding LOCKED) | ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is CANCEL, converged    | CONVERGED      | Converged after a precapture sequenceis canceled, transient states are skipped by camera device.
+     * CONVERGED      | Camera device finished AE scan                              | FLASH_REQUIRED | Converged but too dark w/o flash after a new scan, transient states are skipped by camera device.
+     * FLASH_REQUIRED | Camera device finished AE scan                              | CONVERGED      | Converged after a new scan, transient states are skipped by camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_LOCK
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_CONTROL_SCENE_MODE
+     */
+    ACAMERA_CONTROL_AE_STATE =                                  // byte (enum)
+            ACAMERA_CONTROL_START + 31,
+    /**
+     * <p>Current state of auto-focus (AF) algorithm.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Switching between or enabling AF modes (ACAMERA_CONTROL_AF_MODE) always
+     * resets the AF state to INACTIVE. Similarly, switching between ACAMERA_CONTROL_MODE,
+     * or ACAMERA_CONTROL_SCENE_MODE if <code>ACAMERA_CONTROL_MODE == USE_SCENE_MODE</code> resets all
+     * the algorithm states to INACTIVE.</p>
+     * <p>The camera device can do several state transitions between two results, if it is
+     * allowed by the state transition table. For example: INACTIVE may never actually be
+     * seen in a result.</p>
+     * <p>The state in the result is the state for this image (in sync with this image): if
+     * AF state becomes FOCUSED, then the image data associated with this result should
+     * be sharp.</p>
+     * <p>Below are state transition tables for different AF modes.</p>
+     * <p>When ACAMERA_CONTROL_AF_MODE is AF_MODE_OFF or AF_MODE_EDOF:</p>
+     * <p>State       | Transition Cause | New State | Notes
+     * :------------:|:----------------:|:---------:|:-----------:
+     * INACTIVE      |                  | INACTIVE  | Never changes</p>
+     * <p>When ACAMERA_CONTROL_AF_MODE is AF_MODE_AUTO or AF_MODE_MACRO:</p>
+     * <p>State            | Transition Cause | New State          | Notes
+     * :-----------------:|:----------------:|:------------------:|:--------------:
+     * INACTIVE           | AF_TRIGGER       | ACTIVE_SCAN        | Start AF sweep, Lens now moving
+     * ACTIVE_SCAN        | AF sweep done    | FOCUSED_LOCKED     | Focused, Lens now locked
+     * ACTIVE_SCAN        | AF sweep done    | NOT_FOCUSED_LOCKED | Not focused, Lens now locked
+     * ACTIVE_SCAN        | AF_CANCEL        | INACTIVE           | Cancel/reset AF, Lens now locked
+     * FOCUSED_LOCKED     | AF_CANCEL        | INACTIVE           | Cancel/reset AF
+     * FOCUSED_LOCKED     | AF_TRIGGER       | ACTIVE_SCAN        | Start new sweep, Lens now moving
+     * NOT_FOCUSED_LOCKED | AF_CANCEL        | INACTIVE           | Cancel/reset AF
+     * NOT_FOCUSED_LOCKED | AF_TRIGGER       | ACTIVE_SCAN        | Start new sweep, Lens now moving
+     * Any state          | Mode change      | INACTIVE           |</p>
+     * <p>For the above table, the camera device may skip reporting any state changes that happen
+     * without application intervention (i.e. mode switch, trigger, locking). Any state that
+     * can be skipped in that manner is called a transient state.</p>
+     * <p>For example, for these AF modes (AF_MODE_AUTO and AF_MODE_MACRO), in addition to the
+     * state transitions listed in above table, it is also legal for the camera device to skip
+     * one or more transient states between two results. See below table for examples:</p>
+     * <p>State            | Transition Cause | New State          | Notes
+     * :-----------------:|:----------------:|:------------------:|:--------------:
+     * INACTIVE           | AF_TRIGGER       | FOCUSED_LOCKED     | Focus is already good or good after a scan, lens is now locked.
+     * INACTIVE           | AF_TRIGGER       | NOT_FOCUSED_LOCKED | Focus failed after a scan, lens is now locked.
+     * FOCUSED_LOCKED     | AF_TRIGGER       | FOCUSED_LOCKED     | Focus is already good or good after a scan, lens is now locked.
+     * NOT_FOCUSED_LOCKED | AF_TRIGGER       | FOCUSED_LOCKED     | Focus is good after a scan, lens is not locked.</p>
+     * <p>When ACAMERA_CONTROL_AF_MODE is AF_MODE_CONTINUOUS_VIDEO:</p>
+     * <p>State            | Transition Cause                    | New State          | Notes
+     * :-----------------:|:-----------------------------------:|:------------------:|:--------------:
+     * INACTIVE           | Camera device initiates new scan    | PASSIVE_SCAN       | Start AF scan, Lens now moving
+     * INACTIVE           | AF_TRIGGER                          | NOT_FOCUSED_LOCKED | AF state query, Lens now locked
+     * PASSIVE_SCAN       | Camera device completes current scan| PASSIVE_FOCUSED    | End AF scan, Lens now locked
+     * PASSIVE_SCAN       | Camera device fails current scan    | PASSIVE_UNFOCUSED  | End AF scan, Lens now locked
+     * PASSIVE_SCAN       | AF_TRIGGER                          | FOCUSED_LOCKED     | Immediate transition, if focus is good. Lens now locked
+     * PASSIVE_SCAN       | AF_TRIGGER                          | NOT_FOCUSED_LOCKED | Immediate transition, if focus is bad. Lens now locked
+     * PASSIVE_SCAN       | AF_CANCEL                           | INACTIVE           | Reset lens position, Lens now locked
+     * PASSIVE_FOCUSED    | Camera device initiates new scan    | PASSIVE_SCAN       | Start AF scan, Lens now moving
+     * PASSIVE_UNFOCUSED  | Camera device initiates new scan    | PASSIVE_SCAN       | Start AF scan, Lens now moving
+     * PASSIVE_FOCUSED    | AF_TRIGGER                          | FOCUSED_LOCKED     | Immediate transition, lens now locked
+     * PASSIVE_UNFOCUSED  | AF_TRIGGER                          | NOT_FOCUSED_LOCKED | Immediate transition, lens now locked
+     * FOCUSED_LOCKED     | AF_TRIGGER                          | FOCUSED_LOCKED     | No effect
+     * FOCUSED_LOCKED     | AF_CANCEL                           | INACTIVE           | Restart AF scan
+     * NOT_FOCUSED_LOCKED | AF_TRIGGER                          | NOT_FOCUSED_LOCKED | No effect
+     * NOT_FOCUSED_LOCKED | AF_CANCEL                           | INACTIVE           | Restart AF scan</p>
+     * <p>When ACAMERA_CONTROL_AF_MODE is AF_MODE_CONTINUOUS_PICTURE:</p>
+     * <p>State            | Transition Cause                     | New State          | Notes
+     * :-----------------:|:------------------------------------:|:------------------:|:--------------:
+     * INACTIVE           | Camera device initiates new scan     | PASSIVE_SCAN       | Start AF scan, Lens now moving
+     * INACTIVE           | AF_TRIGGER                           | NOT_FOCUSED_LOCKED | AF state query, Lens now locked
+     * PASSIVE_SCAN       | Camera device completes current scan | PASSIVE_FOCUSED    | End AF scan, Lens now locked
+     * PASSIVE_SCAN       | Camera device fails current scan     | PASSIVE_UNFOCUSED  | End AF scan, Lens now locked
+     * PASSIVE_SCAN       | AF_TRIGGER                           | FOCUSED_LOCKED     | Eventual transition once the focus is good. Lens now locked
+     * PASSIVE_SCAN       | AF_TRIGGER                           | NOT_FOCUSED_LOCKED | Eventual transition if cannot find focus. Lens now locked
+     * PASSIVE_SCAN       | AF_CANCEL                            | INACTIVE           | Reset lens position, Lens now locked
+     * PASSIVE_FOCUSED    | Camera device initiates new scan     | PASSIVE_SCAN       | Start AF scan, Lens now moving
+     * PASSIVE_UNFOCUSED  | Camera device initiates new scan     | PASSIVE_SCAN       | Start AF scan, Lens now moving
+     * PASSIVE_FOCUSED    | AF_TRIGGER                           | FOCUSED_LOCKED     | Immediate trans. Lens now locked
+     * PASSIVE_UNFOCUSED  | AF_TRIGGER                           | NOT_FOCUSED_LOCKED | Immediate trans. Lens now locked
+     * FOCUSED_LOCKED     | AF_TRIGGER                           | FOCUSED_LOCKED     | No effect
+     * FOCUSED_LOCKED     | AF_CANCEL                            | INACTIVE           | Restart AF scan
+     * NOT_FOCUSED_LOCKED | AF_TRIGGER                           | NOT_FOCUSED_LOCKED | No effect
+     * NOT_FOCUSED_LOCKED | AF_CANCEL                            | INACTIVE           | Restart AF scan</p>
+     * <p>When switch between AF_MODE_CONTINUOUS_* (CAF modes) and AF_MODE_AUTO/AF_MODE_MACRO
+     * (AUTO modes), the initial INACTIVE or PASSIVE_SCAN states may be skipped by the
+     * camera device. When a trigger is included in a mode switch request, the trigger
+     * will be evaluated in the context of the new mode in the request.
+     * See below table for examples:</p>
+     * <p>State      | Transition Cause                       | New State                                | Notes
+     * :-----------:|:--------------------------------------:|:----------------------------------------:|:--------------:
+     * any state    | CAF--&gt;AUTO mode switch                 | INACTIVE                                 | Mode switch without trigger, initial state must be INACTIVE
+     * any state    | CAF--&gt;AUTO mode switch with AF_TRIGGER | trigger-reachable states from INACTIVE   | Mode switch with trigger, INACTIVE is skipped
+     * any state    | AUTO--&gt;CAF mode switch                 | passively reachable states from INACTIVE | Mode switch without trigger, passive transient state is skipped</p>
+     *
+     * @see ACAMERA_CONTROL_AF_MODE
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_CONTROL_SCENE_MODE
+     */
+    ACAMERA_CONTROL_AF_STATE =                                  // byte (enum)
+            ACAMERA_CONTROL_START + 32,
+    /**
+     * <p>Current state of auto-white balance (AWB) algorithm.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Switching between or enabling AWB modes (ACAMERA_CONTROL_AWB_MODE) always
+     * resets the AWB state to INACTIVE. Similarly, switching between ACAMERA_CONTROL_MODE,
+     * or ACAMERA_CONTROL_SCENE_MODE if <code>ACAMERA_CONTROL_MODE == USE_SCENE_MODE</code> resets all
+     * the algorithm states to INACTIVE.</p>
+     * <p>The camera device can do several state transitions between two results, if it is
+     * allowed by the state transition table. So INACTIVE may never actually be seen in
+     * a result.</p>
+     * <p>The state in the result is the state for this image (in sync with this image): if
+     * AWB state becomes CONVERGED, then the image data associated with this result should
+     * be good to use.</p>
+     * <p>Below are state transition tables for different AWB modes.</p>
+     * <p>When <code>ACAMERA_CONTROL_AWB_MODE != AWB_MODE_AUTO</code>:</p>
+     * <p>State       | Transition Cause | New State | Notes
+     * :------------:|:----------------:|:---------:|:-----------------------:
+     * INACTIVE      |                  |INACTIVE   |Camera device auto white balance algorithm is disabled</p>
+     * <p>When ACAMERA_CONTROL_AWB_MODE is AWB_MODE_AUTO:</p>
+     * <p>State        | Transition Cause                 | New State     | Notes
+     * :-------------:|:--------------------------------:|:-------------:|:-----------------:
+     * INACTIVE       | Camera device initiates AWB scan | SEARCHING     | Values changing
+     * INACTIVE       | ACAMERA_CONTROL_AWB_LOCK is ON    | LOCKED        | Values locked
+     * SEARCHING      | Camera device finishes AWB scan  | CONVERGED     | Good values, not changing
+     * SEARCHING      | ACAMERA_CONTROL_AWB_LOCK is ON    | LOCKED        | Values locked
+     * CONVERGED      | Camera device initiates AWB scan | SEARCHING     | Values changing
+     * CONVERGED      | ACAMERA_CONTROL_AWB_LOCK is ON    | LOCKED        | Values locked
+     * LOCKED         | ACAMERA_CONTROL_AWB_LOCK is OFF   | SEARCHING     | Values not good after unlock</p>
+     * <p>For the above table, the camera device may skip reporting any state changes that happen
+     * without application intervention (i.e. mode switch, trigger, locking). Any state that
+     * can be skipped in that manner is called a transient state.</p>
+     * <p>For example, for this AWB mode (AWB_MODE_AUTO), in addition to the state transitions
+     * listed in above table, it is also legal for the camera device to skip one or more
+     * transient states between two results. See below table for examples:</p>
+     * <p>State        | Transition Cause                 | New State     | Notes
+     * :-------------:|:--------------------------------:|:-------------:|:-----------------:
+     * INACTIVE       | Camera device finished AWB scan  | CONVERGED     | Values are already good, transient states are skipped by camera device.
+     * LOCKED         | ACAMERA_CONTROL_AWB_LOCK is OFF   | CONVERGED     | Values good after unlock, transient states are skipped by camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_AWB_LOCK
+     * @see ACAMERA_CONTROL_AWB_MODE
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_CONTROL_SCENE_MODE
+     */
+    ACAMERA_CONTROL_AWB_STATE =                                 // byte (enum)
+            ACAMERA_CONTROL_START + 34,
+    /**
+     * <p>Whether the camera device supports ACAMERA_CONTROL_AE_LOCK</p>
+     *
+     * @see ACAMERA_CONTROL_AE_LOCK
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Devices with MANUAL_SENSOR capability or BURST_CAPTURE capability will always
+     * list <code>true</code>. This includes FULL devices.</p>
+     */
+    ACAMERA_CONTROL_AE_LOCK_AVAILABLE =                         // byte (enum)
+            ACAMERA_CONTROL_START + 36,
+    /**
+     * <p>Whether the camera device supports ACAMERA_CONTROL_AWB_LOCK</p>
+     *
+     * @see ACAMERA_CONTROL_AWB_LOCK
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Devices with MANUAL_POST_PROCESSING capability or BURST_CAPTURE capability will
+     * always list <code>true</code>. This includes FULL devices.</p>
+     */
+    ACAMERA_CONTROL_AWB_LOCK_AVAILABLE =                        // byte (enum)
+            ACAMERA_CONTROL_START + 37,
+    /**
+     * <p>List of control modes for ACAMERA_CONTROL_MODE that are supported by this camera
+     * device.</p>
+     *
+     * @see ACAMERA_CONTROL_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This list contains control modes that can be set for the camera device.
+     * LEGACY mode devices will always support AUTO mode. LIMITED and FULL
+     * devices will always support OFF, AUTO modes.</p>
+     */
+    ACAMERA_CONTROL_AVAILABLE_MODES =                           // byte[n]
+            ACAMERA_CONTROL_START + 38,
+    /**
+     * <p>Range of boosts for ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST supported
+     * by this camera device.</p>
+     *
+     * @see ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Devices support post RAW sensitivity boost  will advertise
+     * ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST key for controling
+     * post RAW sensitivity boost.</p>
+     * <p>This key will be <code>null</code> for devices that do not support any RAW format
+     * outputs. For devices that do support RAW format outputs, this key will always
+     * present, and if a device does not support post RAW sensitivity boost, it will
+     * list <code>(100, 100)</code> in this key.</p>
+     *
+     * @see ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST
+     */
+    ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE =          // int32[2]
+            ACAMERA_CONTROL_START + 39,
+    /**
+     * <p>The amount of additional sensitivity boost applied to output images
+     * after RAW sensor data is captured.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Some camera devices support additional digital sensitivity boosting in the
+     * camera processing pipeline after sensor RAW image is captured.
+     * Such a boost will be applied to YUV/JPEG format output images but will not
+     * have effect on RAW output formats like RAW_SENSOR, RAW10, RAW12 or RAW_OPAQUE.</p>
+     * <p>This key will be <code>null</code> for devices that do not support any RAW format
+     * outputs. For devices that do support RAW format outputs, this key will always
+     * present, and if a device does not support post RAW sensitivity boost, it will
+     * list <code>100</code> in this key.</p>
+     * <p>If the camera device cannot apply the exact boost requested, it will reduce the
+     * boost to the nearest supported value.
+     * The final boost value used will be available in the output capture result.</p>
+     * <p>For devices that support post RAW sensitivity boost, the YUV/JPEG output images
+     * of such device will have the total sensitivity of
+     * <code>ACAMERA_SENSOR_SENSITIVITY * ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST / 100</code>
+     * The sensitivity of RAW format images will always be <code>ACAMERA_SENSOR_SENSITIVITY</code></p>
+     * <p>This control is only effective if ACAMERA_CONTROL_AE_MODE or ACAMERA_CONTROL_MODE is set to
+     * OFF; otherwise the auto-exposure algorithm will override this value.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST =                // int32
+            ACAMERA_CONTROL_START + 40,
+    ACAMERA_CONTROL_END,
+
+    /**
+     * <p>Operation mode for edge
+     * enhancement.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Edge enhancement improves sharpness and details in the captured image. OFF means
+     * no enhancement will be applied by the camera device.</p>
+     * <p>FAST/HIGH_QUALITY both mean camera device determined enhancement
+     * will be applied. HIGH_QUALITY mode indicates that the
+     * camera device will use the highest-quality enhancement algorithms,
+     * even if it slows down capture rate. FAST means the camera device will
+     * not slow down capture rate when applying edge enhancement. FAST may be the same as OFF if
+     * edge enhancement will slow down capture rate. Every output stream will have a similar
+     * amount of enhancement applied.</p>
+     * <p>ZERO_SHUTTER_LAG is meant to be used by applications that maintain a continuous circular
+     * buffer of high-resolution images during preview and reprocess image(s) from that buffer
+     * into a final capture when triggered by the user. In this mode, the camera device applies
+     * edge enhancement to low-resolution streams (below maximum recording resolution) to
+     * maximize preview quality, but does not apply edge enhancement to high-resolution streams,
+     * since those will be reprocessed later if necessary.</p>
+     * <p>For YUV_REPROCESSING, these FAST/HIGH_QUALITY modes both mean that the camera
+     * device will apply FAST/HIGH_QUALITY YUV-domain edge enhancement, respectively.
+     * The camera device may adjust its internal edge enhancement parameters for best
+     * image quality based on the android.reprocess.effectiveExposureFactor, if it is set.</p>
+     */
+    ACAMERA_EDGE_MODE =                                         // byte (enum)
+            ACAMERA_EDGE_START,
+    /**
+     * <p>List of edge enhancement modes for ACAMERA_EDGE_MODE that are supported by this camera
+     * device.</p>
+     *
+     * @see ACAMERA_EDGE_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Full-capability camera devices must always support OFF; camera devices that support
+     * YUV_REPROCESSING or PRIVATE_REPROCESSING will list ZERO_SHUTTER_LAG; all devices will
+     * list FAST.</p>
+     */
+    ACAMERA_EDGE_AVAILABLE_EDGE_MODES =                         // byte[n]
+            ACAMERA_EDGE_START + 2,
+    ACAMERA_EDGE_END,
+
+    /**
+     * <p>The desired mode for for the camera device's flash control.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This control is only effective when flash unit is available
+     * (<code>ACAMERA_FLASH_INFO_AVAILABLE == true</code>).</p>
+     * <p>When this control is used, the ACAMERA_CONTROL_AE_MODE must be set to ON or OFF.
+     * Otherwise, the camera device auto-exposure related flash control (ON_AUTO_FLASH,
+     * ON_ALWAYS_FLASH, or ON_AUTO_FLASH_REDEYE) will override this control.</p>
+     * <p>When set to OFF, the camera device will not fire flash for this capture.</p>
+     * <p>When set to SINGLE, the camera device will fire flash regardless of the camera
+     * device's auto-exposure routine's result. When used in still capture case, this
+     * control should be used along with auto-exposure (AE) precapture metering sequence
+     * (ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER), otherwise, the image may be incorrectly exposed.</p>
+     * <p>When set to TORCH, the flash will be on continuously. This mode can be used
+     * for use cases such as preview, auto-focus assist, still capture, or video recording.</p>
+     * <p>The flash status will be reported by ACAMERA_FLASH_STATE in the capture result metadata.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * @see ACAMERA_FLASH_INFO_AVAILABLE
+     * @see ACAMERA_FLASH_STATE
+     */
+    ACAMERA_FLASH_MODE =                                        // byte (enum)
+            ACAMERA_FLASH_START + 2,
+    /**
+     * <p>Current state of the flash
+     * unit.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>When the camera device doesn't have flash unit
+     * (i.e. <code>ACAMERA_FLASH_INFO_AVAILABLE == false</code>), this state will always be UNAVAILABLE.
+     * Other states indicate the current flash status.</p>
+     * <p>In certain conditions, this will be available on LEGACY devices:</p>
+     * <ul>
+     * <li>Flash-less cameras always return UNAVAILABLE.</li>
+     * <li>Using ACAMERA_CONTROL_AE_MODE <code>==</code> ON_ALWAYS_FLASH
+     *    will always return FIRED.</li>
+     * <li>Using ACAMERA_FLASH_MODE <code>==</code> TORCH
+     *    will always return FIRED.</li>
+     * </ul>
+     * <p>In all other conditions the state will not be available on
+     * LEGACY devices (i.e. it will be <code>null</code>).</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_FLASH_INFO_AVAILABLE
+     * @see ACAMERA_FLASH_MODE
+     */
+    ACAMERA_FLASH_STATE =                                       // byte (enum)
+            ACAMERA_FLASH_START + 5,
+    ACAMERA_FLASH_END,
+
+    /**
+     * <p>Whether this camera device has a
+     * flash unit.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Will be <code>false</code> if no flash is available.</p>
+     * <p>If there is no flash unit, none of the flash controls do
+     * anything.</p>
+     */
+    ACAMERA_FLASH_INFO_AVAILABLE =                              // byte (enum)
+            ACAMERA_FLASH_INFO_START,
+    ACAMERA_FLASH_INFO_END,
+
+    /**
+     * <p>Operational mode for hot pixel correction.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Hotpixel correction interpolates out, or otherwise removes, pixels
+     * that do not accurately measure the incoming light (i.e. pixels that
+     * are stuck at an arbitrary value or are oversensitive).</p>
+     */
+    ACAMERA_HOT_PIXEL_MODE =                                    // byte (enum)
+            ACAMERA_HOT_PIXEL_START,
+    /**
+     * <p>List of hot pixel correction modes for ACAMERA_HOT_PIXEL_MODE that are supported by this
+     * camera device.</p>
+     *
+     * @see ACAMERA_HOT_PIXEL_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>FULL mode camera devices will always support FAST.</p>
+     */
+    ACAMERA_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES =               // byte[n]
+            ACAMERA_HOT_PIXEL_START + 1,
+    ACAMERA_HOT_PIXEL_END,
+
+    /**
+     * <p>GPS coordinates to include in output JPEG
+     * EXIF.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_JPEG_GPS_COORDINATES =                              // double[3]
+            ACAMERA_JPEG_START,
+    /**
+     * <p>32 characters describing GPS algorithm to
+     * include in EXIF.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_JPEG_GPS_PROCESSING_METHOD =                        // byte
+            ACAMERA_JPEG_START + 1,
+    /**
+     * <p>Time GPS fix was made to include in
+     * EXIF.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_JPEG_GPS_TIMESTAMP =                                // int64
+            ACAMERA_JPEG_START + 2,
+    /**
+     * <p>The orientation for a JPEG image.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>The clockwise rotation angle in degrees, relative to the orientation
+     * to the camera, that the JPEG picture needs to be rotated by, to be viewed
+     * upright.</p>
+     * <p>Camera devices may either encode this value into the JPEG EXIF header, or
+     * rotate the image data to match this orientation. When the image data is rotated,
+     * the thumbnail data will also be rotated.</p>
+     * <p>Note that this orientation is relative to the orientation of the camera sensor, given
+     * by ACAMERA_SENSOR_ORIENTATION.</p>
+     * <p>To translate from the device orientation given by the Android sensor APIs, the following
+     * sample code may be used:</p>
+     * <pre><code>private int getJpegOrientation(CameraCharacteristics c, int deviceOrientation) {
+     *     if (deviceOrientation == android.view.OrientationEventListener.ORIENTATION_UNKNOWN) return 0;
+     *     int sensorOrientation = c.get(CameraCharacteristics.SENSOR_ORIENTATION);
+     *
+     *     // Round device orientation to a multiple of 90
+     *     deviceOrientation = (deviceOrientation + 45) / 90 * 90;
+     *
+     *     // Reverse device orientation for front-facing cameras
+     *     boolean facingFront = c.get(CameraCharacteristics.LENS_FACING) == CameraCharacteristics.LENS_FACING_FRONT;
+     *     if (facingFront) deviceOrientation = -deviceOrientation;
+     *
+     *     // Calculate desired JPEG orientation relative to camera orientation to make
+     *     // the image upright relative to the device orientation
+     *     int jpegOrientation = (sensorOrientation + deviceOrientation + 360) % 360;
+     *
+     *     return jpegOrientation;
+     * }
+     * </code></pre>
+     *
+     * @see ACAMERA_SENSOR_ORIENTATION
+     */
+    ACAMERA_JPEG_ORIENTATION =                                  // int32
+            ACAMERA_JPEG_START + 3,
+    /**
+     * <p>Compression quality of the final JPEG
+     * image.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>85-95 is typical usage range.</p>
+     */
+    ACAMERA_JPEG_QUALITY =                                      // byte
+            ACAMERA_JPEG_START + 4,
+    /**
+     * <p>Compression quality of JPEG
+     * thumbnail.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_JPEG_THUMBNAIL_QUALITY =                            // byte
+            ACAMERA_JPEG_START + 5,
+    /**
+     * <p>Resolution of embedded JPEG thumbnail.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When set to (0, 0) value, the JPEG EXIF will not contain thumbnail,
+     * but the captured JPEG will still be a valid image.</p>
+     * <p>For best results, when issuing a request for a JPEG image, the thumbnail size selected
+     * should have the same aspect ratio as the main JPEG output.</p>
+     * <p>If the thumbnail image aspect ratio differs from the JPEG primary image aspect
+     * ratio, the camera device creates the thumbnail by cropping it from the primary image.
+     * For example, if the primary image has 4:3 aspect ratio, the thumbnail image has
+     * 16:9 aspect ratio, the primary image will be cropped vertically (letterbox) to
+     * generate the thumbnail image. The thumbnail image will always have a smaller Field
+     * Of View (FOV) than the primary image when aspect ratios differ.</p>
+     * <p>When an ACAMERA_JPEG_ORIENTATION of non-zero degree is requested,
+     * the camera device will handle thumbnail rotation in one of the following ways:</p>
+     * <ul>
+     * <li>Set the
+     *   <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>
+     *   and keep jpeg and thumbnail image data unrotated.</li>
+     * <li>Rotate the jpeg and thumbnail image data and not set
+     *   <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>.
+     *   In this case, LIMITED or FULL hardware level devices will report rotated thumnail size
+     *   in capture result, so the width and height will be interchanged if 90 or 270 degree
+     *   orientation is requested. LEGACY device will always report unrotated thumbnail size.</li>
+     * </ul>
+     *
+     * @see ACAMERA_JPEG_ORIENTATION
+     */
+    ACAMERA_JPEG_THUMBNAIL_SIZE =                               // int32[2]
+            ACAMERA_JPEG_START + 6,
+    /**
+     * <p>List of JPEG thumbnail sizes for ACAMERA_JPEG_THUMBNAIL_SIZE supported by this
+     * camera device.</p>
+     *
+     * @see ACAMERA_JPEG_THUMBNAIL_SIZE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This list will include at least one non-zero resolution, plus <code>(0,0)</code> for indicating no
+     * thumbnail should be generated.</p>
+     * <p>Below condiditions will be satisfied for this size list:</p>
+     * <ul>
+     * <li>The sizes will be sorted by increasing pixel area (width x height).
+     * If several resolutions have the same area, they will be sorted by increasing width.</li>
+     * <li>The aspect ratio of the largest thumbnail size will be same as the
+     * aspect ratio of largest JPEG output size in ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS.
+     * The largest size is defined as the size that has the largest pixel area
+     * in a given size list.</li>
+     * <li>Each output JPEG size in ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS will have at least
+     * one corresponding size that has the same aspect ratio in availableThumbnailSizes,
+     * and vice versa.</li>
+     * <li>All non-<code>(0, 0)</code> sizes will have non-zero widths and heights.</li>
+     * </ul>
+     *
+     * @see ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
+     */
+    ACAMERA_JPEG_AVAILABLE_THUMBNAIL_SIZES =                    // int32[2*n]
+            ACAMERA_JPEG_START + 7,
+    ACAMERA_JPEG_END,
+
+    /**
+     * <p>The desired lens aperture size, as a ratio of lens focal length to the
+     * effective aperture diameter.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Setting this value is only supported on the camera devices that have a variable
+     * aperture lens.</p>
+     * <p>When this is supported and ACAMERA_CONTROL_AE_MODE is OFF,
+     * this can be set along with ACAMERA_SENSOR_EXPOSURE_TIME,
+     * ACAMERA_SENSOR_SENSITIVITY, and ACAMERA_SENSOR_FRAME_DURATION
+     * to achieve manual exposure control.</p>
+     * <p>The requested aperture value may take several frames to reach the
+     * requested value; the camera device will report the current (intermediate)
+     * aperture size in capture result metadata while the aperture is changing.
+     * While the aperture is still changing, ACAMERA_LENS_STATE will be set to MOVING.</p>
+     * <p>When this is supported and ACAMERA_CONTROL_AE_MODE is one of
+     * the ON modes, this will be overridden by the camera device
+     * auto-exposure algorithm, the overridden values are then provided
+     * back to the user in the corresponding result.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_LENS_STATE
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_LENS_APERTURE =                                     // float
+            ACAMERA_LENS_START,
+    /**
+     * <p>The desired setting for the lens neutral density filter(s).</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This control will not be supported on most camera devices.</p>
+     * <p>Lens filters are typically used to lower the amount of light the
+     * sensor is exposed to (measured in steps of EV). As used here, an EV
+     * step is the standard logarithmic representation, which are
+     * non-negative, and inversely proportional to the amount of light
+     * hitting the sensor.  For example, setting this to 0 would result
+     * in no reduction of the incoming light, and setting this to 2 would
+     * mean that the filter is set to reduce incoming light by two stops
+     * (allowing 1/4 of the prior amount of light to the sensor).</p>
+     * <p>It may take several frames before the lens filter density changes
+     * to the requested value. While the filter density is still changing,
+     * ACAMERA_LENS_STATE will be set to MOVING.</p>
+     *
+     * @see ACAMERA_LENS_STATE
+     */
+    ACAMERA_LENS_FILTER_DENSITY =                               // float
+            ACAMERA_LENS_START + 1,
+    /**
+     * <p>The desired lens focal length; used for optical zoom.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This setting controls the physical focal length of the camera
+     * device's lens. Changing the focal length changes the field of
+     * view of the camera device, and is usually used for optical zoom.</p>
+     * <p>Like ACAMERA_LENS_FOCUS_DISTANCE and ACAMERA_LENS_APERTURE, this
+     * setting won't be applied instantaneously, and it may take several
+     * frames before the lens can change to the requested focal length.
+     * While the focal length is still changing, ACAMERA_LENS_STATE will
+     * be set to MOVING.</p>
+     * <p>Optical zoom will not be supported on most devices.</p>
+     *
+     * @see ACAMERA_LENS_APERTURE
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     * @see ACAMERA_LENS_STATE
+     */
+    ACAMERA_LENS_FOCAL_LENGTH =                                 // float
+            ACAMERA_LENS_START + 2,
+    /**
+     * <p>Desired distance to plane of sharpest focus,
+     * measured from frontmost surface of the lens.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Should be zero for fixed-focus cameras</p>
+     */
+    ACAMERA_LENS_FOCUS_DISTANCE =                               // float
+            ACAMERA_LENS_START + 3,
+    /**
+     * <p>Sets whether the camera device uses optical image stabilization (OIS)
+     * when capturing images.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>OIS is used to compensate for motion blur due to small
+     * movements of the camera during capture. Unlike digital image
+     * stabilization (ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE), OIS
+     * makes use of mechanical elements to stabilize the camera
+     * sensor, and thus allows for longer exposure times before
+     * camera shake becomes apparent.</p>
+     * <p>Switching between different optical stabilization modes may take several
+     * frames to initialize, the camera device will report the current mode in
+     * capture result metadata. For example, When "ON" mode is requested, the
+     * optical stabilization modes in the first several capture results may still
+     * be "OFF", and it will become "ON" when the initialization is done.</p>
+     * <p>If a camera device supports both OIS and digital image stabilization
+     * (ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE), turning both modes on may produce undesirable
+     * interaction, so it is recommended not to enable both at the same time.</p>
+     * <p>Not all devices will support OIS; see
+     * ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION for
+     * available controls.</p>
+     *
+     * @see ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
+     * @see ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION
+     */
+    ACAMERA_LENS_OPTICAL_STABILIZATION_MODE =                   // byte (enum)
+            ACAMERA_LENS_START + 4,
+    /**
+     * <p>Direction the camera faces relative to
+     * device screen.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_LENS_FACING =                                       // byte (enum)
+            ACAMERA_LENS_START + 5,
+    /**
+     * <p>The orientation of the camera relative to the sensor
+     * coordinate system.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>The four coefficients that describe the quaternion
+     * rotation from the Android sensor coordinate system to a
+     * camera-aligned coordinate system where the X-axis is
+     * aligned with the long side of the image sensor, the Y-axis
+     * is aligned with the short side of the image sensor, and
+     * the Z-axis is aligned with the optical axis of the sensor.</p>
+     * <p>To convert from the quaternion coefficients <code>(x,y,z,w)</code>
+     * to the axis of rotation <code>(a_x, a_y, a_z)</code> and rotation
+     * amount <code>theta</code>, the following formulas can be used:</p>
+     * <pre><code> theta = 2 * acos(w)
+     * a_x = x / sin(theta/2)
+     * a_y = y / sin(theta/2)
+     * a_z = z / sin(theta/2)
+     * </code></pre>
+     * <p>To create a 3x3 rotation matrix that applies the rotation
+     * defined by this quaternion, the following matrix can be
+     * used:</p>
+     * <pre><code>R = [ 1 - 2y^2 - 2z^2,       2xy - 2zw,       2xz + 2yw,
+     *            2xy + 2zw, 1 - 2x^2 - 2z^2,       2yz - 2xw,
+     *            2xz - 2yw,       2yz + 2xw, 1 - 2x^2 - 2y^2 ]
+     * </code></pre>
+     * <p>This matrix can then be used to apply the rotation to a
+     *  column vector point with</p>
+     * <p><code>p' = Rp</code></p>
+     * <p>where <code>p</code> is in the device sensor coordinate system, and
+     *  <code>p'</code> is in the camera-oriented coordinate system.</p>
+     */
+    ACAMERA_LENS_POSE_ROTATION =                                // float[4]
+            ACAMERA_LENS_START + 6,
+    /**
+     * <p>Position of the camera optical center.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>The position of the camera device's lens optical center,
+     * as a three-dimensional vector <code>(x,y,z)</code>, relative to the
+     * optical center of the largest camera device facing in the
+     * same direction as this camera, in the
+     * <a href="https://developer.android.com/reference/android/hardware/SensorEvent.html">Android sensor coordinate axes</a>.
+     * Note that only the axis definitions are shared with
+     * the sensor coordinate system, but not the origin.</p>
+     * <p>If this device is the largest or only camera device with a
+     * given facing, then this position will be <code>(0, 0, 0)</code>; a
+     * camera device with a lens optical center located 3 cm from
+     * the main sensor along the +X axis (to the right from the
+     * user's perspective) will report <code>(0.03, 0, 0)</code>.</p>
+     * <p>To transform a pixel coordinates between two cameras
+     * facing the same direction, first the source camera
+     * ACAMERA_LENS_RADIAL_DISTORTION must be corrected for.  Then
+     * the source camera ACAMERA_LENS_INTRINSIC_CALIBRATION needs
+     * to be applied, followed by the ACAMERA_LENS_POSE_ROTATION
+     * of the source camera, the translation of the source camera
+     * relative to the destination camera, the
+     * ACAMERA_LENS_POSE_ROTATION of the destination camera, and
+     * finally the inverse of ACAMERA_LENS_INTRINSIC_CALIBRATION
+     * of the destination camera. This obtains a
+     * radial-distortion-free coordinate in the destination
+     * camera pixel coordinates.</p>
+     * <p>To compare this against a real image from the destination
+     * camera, the destination camera image then needs to be
+     * corrected for radial distortion before comparison or
+     * sampling.</p>
+     *
+     * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
+     * @see ACAMERA_LENS_POSE_ROTATION
+     * @see ACAMERA_LENS_RADIAL_DISTORTION
+     */
+    ACAMERA_LENS_POSE_TRANSLATION =                             // float[3]
+            ACAMERA_LENS_START + 7,
+    /**
+     * <p>The range of scene distances that are in
+     * sharp focus (depth of field).</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>If variable focus not supported, can still report
+     * fixed depth of field range</p>
+     */
+    ACAMERA_LENS_FOCUS_RANGE =                                  // float[2]
+            ACAMERA_LENS_START + 8,
+    /**
+     * <p>Current lens status.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>For lens parameters ACAMERA_LENS_FOCAL_LENGTH, ACAMERA_LENS_FOCUS_DISTANCE,
+     * ACAMERA_LENS_FILTER_DENSITY and ACAMERA_LENS_APERTURE, when changes are requested,
+     * they may take several frames to reach the requested values. This state indicates
+     * the current status of the lens parameters.</p>
+     * <p>When the state is STATIONARY, the lens parameters are not changing. This could be
+     * either because the parameters are all fixed, or because the lens has had enough
+     * time to reach the most recently-requested values.
+     * If all these lens parameters are not changable for a camera device, as listed below:</p>
+     * <ul>
+     * <li>Fixed focus (<code>ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE == 0</code>), which means
+     * ACAMERA_LENS_FOCUS_DISTANCE parameter will always be 0.</li>
+     * <li>Fixed focal length (ACAMERA_LENS_INFO_AVAILABLE_FOCAL_LENGTHS contains single value),
+     * which means the optical zoom is not supported.</li>
+     * <li>No ND filter (ACAMERA_LENS_INFO_AVAILABLE_FILTER_DENSITIES contains only 0).</li>
+     * <li>Fixed aperture (ACAMERA_LENS_INFO_AVAILABLE_APERTURES contains single value).</li>
+     * </ul>
+     * <p>Then this state will always be STATIONARY.</p>
+     * <p>When the state is MOVING, it indicates that at least one of the lens parameters
+     * is changing.</p>
+     *
+     * @see ACAMERA_LENS_APERTURE
+     * @see ACAMERA_LENS_FILTER_DENSITY
+     * @see ACAMERA_LENS_FOCAL_LENGTH
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     * @see ACAMERA_LENS_INFO_AVAILABLE_APERTURES
+     * @see ACAMERA_LENS_INFO_AVAILABLE_FILTER_DENSITIES
+     * @see ACAMERA_LENS_INFO_AVAILABLE_FOCAL_LENGTHS
+     * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
+     */
+    ACAMERA_LENS_STATE =                                        // byte (enum)
+            ACAMERA_LENS_START + 9,
+    /**
+     * <p>The parameters for this camera device's intrinsic
+     * calibration.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>The five calibration parameters that describe the
+     * transform from camera-centric 3D coordinates to sensor
+     * pixel coordinates:</p>
+     * <pre><code>[f_x, f_y, c_x, c_y, s]
+     * </code></pre>
+     * <p>Where <code>f_x</code> and <code>f_y</code> are the horizontal and vertical
+     * focal lengths, <code>[c_x, c_y]</code> is the position of the optical
+     * axis, and <code>s</code> is a skew parameter for the sensor plane not
+     * being aligned with the lens plane.</p>
+     * <p>These are typically used within a transformation matrix K:</p>
+     * <pre><code>K = [ f_x,   s, c_x,
+     *        0, f_y, c_y,
+     *        0    0,   1 ]
+     * </code></pre>
+     * <p>which can then be combined with the camera pose rotation
+     * <code>R</code> and translation <code>t</code> (ACAMERA_LENS_POSE_ROTATION and
+     * ACAMERA_LENS_POSE_TRANSLATION, respective) to calculate the
+     * complete transform from world coordinates to pixel
+     * coordinates:</p>
+     * <pre><code>P = [ K 0   * [ R t
+     *      0 1 ]     0 1 ]
+     * </code></pre>
+     * <p>and with <code>p_w</code> being a point in the world coordinate system
+     * and <code>p_s</code> being a point in the camera active pixel array
+     * coordinate system, and with the mapping including the
+     * homogeneous division by z:</p>
+     * <pre><code> p_h = (x_h, y_h, z_h) = P p_w
+     * p_s = p_h / z_h
+     * </code></pre>
+     * <p>so <code>[x_s, y_s]</code> is the pixel coordinates of the world
+     * point, <code>z_s = 1</code>, and <code>w_s</code> is a measurement of disparity
+     * (depth) in pixel coordinates.</p>
+     * <p>Note that the coordinate system for this transform is the
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE system,
+     * where <code>(0,0)</code> is the top-left of the
+     * preCorrectionActiveArraySize rectangle. Once the pose and
+     * intrinsic calibration transforms have been applied to a
+     * world point, then the ACAMERA_LENS_RADIAL_DISTORTION
+     * transform needs to be applied, and the result adjusted to
+     * be in the ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE coordinate
+     * system (where <code>(0, 0)</code> is the top-left of the
+     * activeArraySize rectangle), to determine the final pixel
+     * coordinate of the world point for processed (non-RAW)
+     * output buffers.</p>
+     *
+     * @see ACAMERA_LENS_POSE_ROTATION
+     * @see ACAMERA_LENS_POSE_TRANSLATION
+     * @see ACAMERA_LENS_RADIAL_DISTORTION
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_LENS_INTRINSIC_CALIBRATION =                        // float[5]
+            ACAMERA_LENS_START + 10,
+    /**
+     * <p>The correction coefficients to correct for this camera device's
+     * radial and tangential lens distortion.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Four radial distortion coefficients <code>[kappa_0, kappa_1, kappa_2,
+     * kappa_3]</code> and two tangential distortion coefficients
+     * <code>[kappa_4, kappa_5]</code> that can be used to correct the
+     * lens's geometric distortion with the mapping equations:</p>
+     * <pre><code> x_c = x_i * ( kappa_0 + kappa_1 * r^2 + kappa_2 * r^4 + kappa_3 * r^6 ) +
+     *        kappa_4 * (2 * x_i * y_i) + kappa_5 * ( r^2 + 2 * x_i^2 )
+     *  y_c = y_i * ( kappa_0 + kappa_1 * r^2 + kappa_2 * r^4 + kappa_3 * r^6 ) +
+     *        kappa_5 * (2 * x_i * y_i) + kappa_4 * ( r^2 + 2 * y_i^2 )
+     * </code></pre>
+     * <p>Here, <code>[x_c, y_c]</code> are the coordinates to sample in the
+     * input image that correspond to the pixel values in the
+     * corrected image at the coordinate <code>[x_i, y_i]</code>:</p>
+     * <pre><code> correctedImage(x_i, y_i) = sample_at(x_c, y_c, inputImage)
+     * </code></pre>
+     * <p>The pixel coordinates are defined in a normalized
+     * coordinate system related to the
+     * ACAMERA_LENS_INTRINSIC_CALIBRATION calibration fields.
+     * Both <code>[x_i, y_i]</code> and <code>[x_c, y_c]</code> have <code>(0,0)</code> at the
+     * lens optical center <code>[c_x, c_y]</code>. The maximum magnitudes
+     * of both x and y coordinates are normalized to be 1 at the
+     * edge further from the optical center, so the range
+     * for both dimensions is <code>-1 &lt;= x &lt;= 1</code>.</p>
+     * <p>Finally, <code>r</code> represents the radial distance from the
+     * optical center, <code>r^2 = x_i^2 + y_i^2</code>, and its magnitude
+     * is therefore no larger than <code>|r| &lt;= sqrt(2)</code>.</p>
+     * <p>The distortion model used is the Brown-Conrady model.</p>
+     *
+     * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
+     */
+    ACAMERA_LENS_RADIAL_DISTORTION =                            // float[6]
+            ACAMERA_LENS_START + 11,
+    ACAMERA_LENS_END,
+
+    /**
+     * <p>List of aperture size values for ACAMERA_LENS_APERTURE that are
+     * supported by this camera device.</p>
+     *
+     * @see ACAMERA_LENS_APERTURE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If the camera device doesn't support a variable lens aperture,
+     * this list will contain only one value, which is the fixed aperture size.</p>
+     * <p>If the camera device supports a variable aperture, the aperture values
+     * in this list will be sorted in ascending order.</p>
+     */
+    ACAMERA_LENS_INFO_AVAILABLE_APERTURES =                     // float[n]
+            ACAMERA_LENS_INFO_START,
+    /**
+     * <p>List of neutral density filter values for
+     * ACAMERA_LENS_FILTER_DENSITY that are supported by this camera device.</p>
+     *
+     * @see ACAMERA_LENS_FILTER_DENSITY
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If a neutral density filter is not supported by this camera device,
+     * this list will contain only 0. Otherwise, this list will include every
+     * filter density supported by the camera device, in ascending order.</p>
+     */
+    ACAMERA_LENS_INFO_AVAILABLE_FILTER_DENSITIES =              // float[n]
+            ACAMERA_LENS_INFO_START + 1,
+    /**
+     * <p>List of focal lengths for ACAMERA_LENS_FOCAL_LENGTH that are supported by this camera
+     * device.</p>
+     *
+     * @see ACAMERA_LENS_FOCAL_LENGTH
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If optical zoom is not supported, this list will only contain
+     * a single value corresponding to the fixed focal length of the
+     * device. Otherwise, this list will include every focal length supported
+     * by the camera device, in ascending order.</p>
+     */
+    ACAMERA_LENS_INFO_AVAILABLE_FOCAL_LENGTHS =                 // float[n]
+            ACAMERA_LENS_INFO_START + 2,
+    /**
+     * <p>List of optical image stabilization (OIS) modes for
+     * ACAMERA_LENS_OPTICAL_STABILIZATION_MODE that are supported by this camera device.</p>
+     *
+     * @see ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If OIS is not supported by a given camera device, this list will
+     * contain only OFF.</p>
+     */
+    ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION =         // byte[n]
+            ACAMERA_LENS_INFO_START + 3,
+    /**
+     * <p>Hyperfocal distance for this lens.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If the lens is not fixed focus, the camera device will report this
+     * field when ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION is APPROXIMATE or CALIBRATED.</p>
+     *
+     * @see ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
+     */
+    ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE =                     // float
+            ACAMERA_LENS_INFO_START + 4,
+    /**
+     * <p>Shortest distance from frontmost surface
+     * of the lens that can be brought into sharp focus.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If the lens is fixed-focus, this will be
+     * 0.</p>
+     */
+    ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE =                  // float
+            ACAMERA_LENS_INFO_START + 5,
+    /**
+     * <p>Dimensions of lens shading map.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The map should be on the order of 30-40 rows and columns, and
+     * must be smaller than 64x64.</p>
+     */
+    ACAMERA_LENS_INFO_SHADING_MAP_SIZE =                        // int32[2]
+            ACAMERA_LENS_INFO_START + 6,
+    /**
+     * <p>The lens focus distance calibration quality.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The lens focus distance calibration quality determines the reliability of
+     * focus related metadata entries, i.e. ACAMERA_LENS_FOCUS_DISTANCE,
+     * ACAMERA_LENS_FOCUS_RANGE, ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE, and
+     * ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE.</p>
+     * <p>APPROXIMATE and CALIBRATED devices report the focus metadata in
+     * units of diopters (1/meter), so <code>0.0f</code> represents focusing at infinity,
+     * and increasing positive numbers represent focusing closer and closer
+     * to the camera device. The focus distance control also uses diopters
+     * on these devices.</p>
+     * <p>UNCALIBRATED devices do not use units that are directly comparable
+     * to any real physical measurement, but <code>0.0f</code> still represents farthest
+     * focus, and ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE represents the
+     * nearest focus the device can achieve.</p>
+     *
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     * @see ACAMERA_LENS_FOCUS_RANGE
+     * @see ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE
+     * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
+     */
+    ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION =              // byte (enum)
+            ACAMERA_LENS_INFO_START + 7,
+    ACAMERA_LENS_INFO_END,
+
+    /**
+     * <p>Mode of operation for the noise reduction algorithm.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>The noise reduction algorithm attempts to improve image quality by removing
+     * excessive noise added by the capture process, especially in dark conditions.</p>
+     * <p>OFF means no noise reduction will be applied by the camera device, for both raw and
+     * YUV domain.</p>
+     * <p>MINIMAL means that only sensor raw domain basic noise reduction is enabled ,to remove
+     * demosaicing or other processing artifacts. For YUV_REPROCESSING, MINIMAL is same as OFF.
+     * This mode is optional, may not be support by all devices. The application should check
+     * ACAMERA_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES before using it.</p>
+     * <p>FAST/HIGH_QUALITY both mean camera device determined noise filtering
+     * will be applied. HIGH_QUALITY mode indicates that the camera device
+     * will use the highest-quality noise filtering algorithms,
+     * even if it slows down capture rate. FAST means the camera device will not
+     * slow down capture rate when applying noise filtering. FAST may be the same as MINIMAL if
+     * MINIMAL is listed, or the same as OFF if any noise filtering will slow down capture rate.
+     * Every output stream will have a similar amount of enhancement applied.</p>
+     * <p>ZERO_SHUTTER_LAG is meant to be used by applications that maintain a continuous circular
+     * buffer of high-resolution images during preview and reprocess image(s) from that buffer
+     * into a final capture when triggered by the user. In this mode, the camera device applies
+     * noise reduction to low-resolution streams (below maximum recording resolution) to maximize
+     * preview quality, but does not apply noise reduction to high-resolution streams, since
+     * those will be reprocessed later if necessary.</p>
+     * <p>For YUV_REPROCESSING, these FAST/HIGH_QUALITY modes both mean that the camera device
+     * will apply FAST/HIGH_QUALITY YUV domain noise reduction, respectively. The camera device
+     * may adjust the noise reduction parameters for best image quality based on the
+     * android.reprocess.effectiveExposureFactor if it is set.</p>
+     *
+     * @see ACAMERA_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES
+     */
+    ACAMERA_NOISE_REDUCTION_MODE =                              // byte (enum)
+            ACAMERA_NOISE_REDUCTION_START,
+    /**
+     * <p>List of noise reduction modes for ACAMERA_NOISE_REDUCTION_MODE that are supported
+     * by this camera device.</p>
+     *
+     * @see ACAMERA_NOISE_REDUCTION_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Full-capability camera devices will always support OFF and FAST.</p>
+     * <p>Camera devices that support YUV_REPROCESSING or PRIVATE_REPROCESSING will support
+     * ZERO_SHUTTER_LAG.</p>
+     * <p>Legacy-capability camera devices will only support FAST mode.</p>
+     */
+    ACAMERA_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES =   // byte[n]
+            ACAMERA_NOISE_REDUCTION_START + 2,
+    ACAMERA_NOISE_REDUCTION_END,
+
+    /**
+     * <p>The maximum numbers of different types of output streams
+     * that can be configured and used simultaneously by a camera device.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This is a 3 element tuple that contains the max number of output simultaneous
+     * streams for raw sensor, processed (but not stalling), and processed (and stalling)
+     * formats respectively. For example, assuming that JPEG is typically a processed and
+     * stalling stream, if max raw sensor format output stream number is 1, max YUV streams
+     * number is 3, and max JPEG stream number is 2, then this tuple should be <code>(1, 3, 2)</code>.</p>
+     * <p>This lists the upper bound of the number of output streams supported by
+     * the camera device. Using more streams simultaneously may require more hardware and
+     * CPU resources that will consume more power. The image format for an output stream can
+     * be any supported format provided by ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS.
+     * The formats defined in ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS can be catergorized
+     * into the 3 stream types as below:</p>
+     * <ul>
+     * <li>Processed (but stalling): any non-RAW format with a stallDurations &gt; 0.
+     *   Typically {@link AIMAGE_FORMAT_JPEG} format.</li>
+     * <li>Raw formats: {@link AIMAGE_FORMAT_RAW16}, {@link AIMAGE_FORMAT_RAW10}, or
+     *   {@link AIMAGE_FORMAT_RAW12}.</li>
+     * <li>Processed (but not-stalling): any non-RAW format without a stall duration.
+     *   Typically {@link AIMAGE_FORMAT_YUV_420_888}.</li>
+     * </ul>
+     *
+     * @see ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
+     */
+    ACAMERA_REQUEST_MAX_NUM_OUTPUT_STREAMS =                    // int32[3]
+            ACAMERA_REQUEST_START + 6,
+    /**
+     * <p>Specifies the number of pipeline stages the frame went
+     * through from when it was exposed to when the final completed result
+     * was available to the framework.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Depending on what settings are used in the request, and
+     * what streams are configured, the data may undergo less processing,
+     * and some pipeline stages skipped.</p>
+     * <p>See ACAMERA_REQUEST_PIPELINE_MAX_DEPTH for more details.</p>
+     *
+     * @see ACAMERA_REQUEST_PIPELINE_MAX_DEPTH
+     */
+    ACAMERA_REQUEST_PIPELINE_DEPTH =                            // byte
+            ACAMERA_REQUEST_START + 9,
+    /**
+     * <p>Specifies the number of maximum pipeline stages a frame
+     * has to go through from when it's exposed to when it's available
+     * to the framework.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>A typical minimum value for this is 2 (one stage to expose,
+     * one stage to readout) from the sensor. The ISP then usually adds
+     * its own stages to do custom HW processing. Further stages may be
+     * added by SW processing.</p>
+     * <p>Depending on what settings are used (e.g. YUV, JPEG) and what
+     * processing is enabled (e.g. face detection), the actual pipeline
+     * depth (specified by ACAMERA_REQUEST_PIPELINE_DEPTH) may be less than
+     * the max pipeline depth.</p>
+     * <p>A pipeline depth of X stages is equivalent to a pipeline latency of
+     * X frame intervals.</p>
+     * <p>This value will normally be 8 or less, however, for high speed capture session,
+     * the max pipeline depth will be up to 8 x size of high speed capture request list.</p>
+     *
+     * @see ACAMERA_REQUEST_PIPELINE_DEPTH
+     */
+    ACAMERA_REQUEST_PIPELINE_MAX_DEPTH =                        // byte
+            ACAMERA_REQUEST_START + 10,
+    /**
+     * <p>Defines how many sub-components
+     * a result will be composed of.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>In order to combat the pipeline latency, partial results
+     * may be delivered to the application layer from the camera device as
+     * soon as they are available.</p>
+     * <p>Optional; defaults to 1. A value of 1 means that partial
+     * results are not supported, and only the final TotalCaptureResult will
+     * be produced by the camera device.</p>
+     * <p>A typical use case for this might be: after requesting an
+     * auto-focus (AF) lock the new AF state might be available 50%
+     * of the way through the pipeline.  The camera device could
+     * then immediately dispatch this state via a partial result to
+     * the application, and the rest of the metadata via later
+     * partial results.</p>
+     */
+    ACAMERA_REQUEST_PARTIAL_RESULT_COUNT =                      // int32
+            ACAMERA_REQUEST_START + 11,
+    /**
+     * <p>List of capabilities that this camera device
+     * advertises as fully supporting.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>A capability is a contract that the camera device makes in order
+     * to be able to satisfy one or more use cases.</p>
+     * <p>Listing a capability guarantees that the whole set of features
+     * required to support a common use will all be available.</p>
+     * <p>Using a subset of the functionality provided by an unsupported
+     * capability may be possible on a specific camera device implementation;
+     * to do this query each of ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS,
+     * ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS,
+     * ACAMERA_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS.</p>
+     * <p>The following capabilities are guaranteed to be available on
+     * ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL <code>==</code> FULL devices:</p>
+     * <ul>
+     * <li>MANUAL_SENSOR</li>
+     * <li>MANUAL_POST_PROCESSING</li>
+     * </ul>
+     * <p>Other capabilities may be available on either FULL or LIMITED
+     * devices, but the application should query this key to be sure.</p>
+     *
+     * @see ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL
+     * @see ACAMERA_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS
+     * @see ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS
+     * @see ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES =                    // byte[n] (enum)
+            ACAMERA_REQUEST_START + 12,
+    /**
+     * <p>A list of all keys that the camera device has available
+     * to use with {@link ACaptureRequest}.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Attempting to set a key into a CaptureRequest that is not
+     * listed here will result in an invalid request and will be rejected
+     * by the camera device.</p>
+     * <p>This field can be used to query the feature set of a camera device
+     * at a more granular level than capabilities. This is especially
+     * important for optional keys that are not listed under any capability
+     * in ACAMERA_REQUEST_AVAILABLE_CAPABILITIES.</p>
+     *
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     */
+    ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS =                    // int32[n]
+            ACAMERA_REQUEST_START + 13,
+    /**
+     * <p>A list of all keys that the camera device has available
+     * to query with {@link ACameraMetadata} from
+     * {@link ACameraCaptureSession_captureCallback_result}.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Attempting to get a key from a CaptureResult that is not
+     * listed here will always return a <code>null</code> value. Getting a key from
+     * a CaptureResult that is listed here will generally never return a <code>null</code>
+     * value.</p>
+     * <p>The following keys may return <code>null</code> unless they are enabled:</p>
+     * <ul>
+     * <li>ACAMERA_STATISTICS_LENS_SHADING_MAP (non-null iff ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE == ON)</li>
+     * </ul>
+     * <p>(Those sometimes-null keys will nevertheless be listed here
+     * if they are available.)</p>
+     * <p>This field can be used to query the feature set of a camera device
+     * at a more granular level than capabilities. This is especially
+     * important for optional keys that are not listed under any capability
+     * in ACAMERA_REQUEST_AVAILABLE_CAPABILITIES.</p>
+     *
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     * @see ACAMERA_STATISTICS_LENS_SHADING_MAP
+     * @see ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE
+     */
+    ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS =                     // int32[n]
+            ACAMERA_REQUEST_START + 14,
+    /**
+     * <p>A list of all keys that the camera device has available
+     * to query with {@link ACameraMetadata} from
+     * {@link ACameraManager_getCameraCharacteristics}.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This entry follows the same rules as
+     * ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS (except that it applies for
+     * CameraCharacteristics instead of CaptureResult). See above for more
+     * details.</p>
+     *
+     * @see ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS
+     */
+    ACAMERA_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS =            // int32[n]
+            ACAMERA_REQUEST_START + 15,
+    ACAMERA_REQUEST_END,
+
+    /**
+     * <p>The desired region of the sensor to read out for this capture.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>This control can be used to implement digital zoom.</p>
+     * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
+     * <p>The crop region coordinate system is based off
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with <code>(0, 0)</code> being the
+     * top-left corner of the sensor active array.</p>
+     * <p>Output streams use this rectangle to produce their output,
+     * cropping to a smaller region if necessary to maintain the
+     * stream's aspect ratio, then scaling the sensor input to
+     * match the output's configured resolution.</p>
+     * <p>The crop region is applied after the RAW to other color
+     * space (e.g. YUV) conversion. Since raw streams
+     * (e.g. RAW16) don't have the conversion stage, they are not
+     * croppable. The crop region will be ignored by raw streams.</p>
+     * <p>For non-raw streams, any additional per-stream cropping will
+     * be done to maximize the final pixel area of the stream.</p>
+     * <p>For example, if the crop region is set to a 4:3 aspect
+     * ratio, then 4:3 streams will use the exact crop
+     * region. 16:9 streams will further crop vertically
+     * (letterbox).</p>
+     * <p>Conversely, if the crop region is set to a 16:9, then 4:3
+     * outputs will crop horizontally (pillarbox), and 16:9
+     * streams will match exactly. These additional crops will
+     * be centered within the crop region.</p>
+     * <p>The width and height of the crop region cannot
+     * be set to be smaller than
+     * <code>floor( activeArraySize.width / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code> and
+     * <code>floor( activeArraySize.height / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code>, respectively.</p>
+     * <p>The camera device may adjust the crop region to account
+     * for rounding and other hardware requirements; the final
+     * crop region used will be included in the output capture
+     * result.</p>
+     *
+     * @see ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_SCALER_CROP_REGION =                                // int32[4]
+            ACAMERA_SCALER_START,
+    /**
+     * <p>The maximum ratio between both active area width
+     * and crop region width, and active area height and
+     * crop region height, for ACAMERA_SCALER_CROP_REGION.</p>
+     *
+     * @see ACAMERA_SCALER_CROP_REGION
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This represents the maximum amount of zooming possible by
+     * the camera device, or equivalently, the minimum cropping
+     * window size.</p>
+     * <p>Crop regions that have a width or height that is smaller
+     * than this ratio allows will be rounded up to the minimum
+     * allowed size by the camera device.</p>
+     */
+    ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM =                 // float
+            ACAMERA_SCALER_START + 4,
+    /**
+     * <p>The available stream configurations that this
+     * camera device supports
+     * (i.e. format, width, height, output/input stream).</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The configurations are listed as <code>(format, width, height, input?)</code>
+     * tuples.</p>
+     * <p>For a given use case, the actual maximum supported resolution
+     * may be lower than what is listed here, depending on the destination
+     * Surface for the image data. For example, for recording video,
+     * the video encoder chosen may have a maximum size limit (e.g. 1080p)
+     * smaller than what the camera (e.g. maximum resolution is 3264x2448)
+     * can provide.</p>
+     * <p>Please reference the documentation for the image data destination to
+     * check if it limits the maximum size for image data.</p>
+     * <p>Not all output formats may be supported in a configuration with
+     * an input stream of a particular format. For more details, see
+     * android.scaler.availableInputOutputFormatsMap.</p>
+     * <p>The following table describes the minimum required output stream
+     * configurations based on the hardware level
+     * (ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL):</p>
+     * <p>Format         | Size                                         | Hardware Level | Notes
+     * :-------------:|:--------------------------------------------:|:--------------:|:--------------:
+     * JPEG           | ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE          | Any            |
+     * JPEG           | 1920x1080 (1080p)                            | Any            | if 1080p &lt;= activeArraySize
+     * JPEG           | 1280x720 (720)                               | Any            | if 720p &lt;= activeArraySize
+     * JPEG           | 640x480 (480p)                               | Any            | if 480p &lt;= activeArraySize
+     * JPEG           | 320x240 (240p)                               | Any            | if 240p &lt;= activeArraySize
+     * YUV_420_888    | all output sizes available for JPEG          | FULL           |
+     * YUV_420_888    | all output sizes available for JPEG, up to the maximum video size | LIMITED        |
+     * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
+     * <p>Refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES for additional
+     * mandatory stream configurations on a per-capability basis.</p>
+     *
+     * @see ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS =            // int32[n*4] (enum)
+            ACAMERA_SCALER_START + 10,
+    /**
+     * <p>This lists the minimum frame duration for each
+     * format/size combination.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This should correspond to the frame duration when only that
+     * stream is active, with all processing (typically in android.*.mode)
+     * set to either OFF or FAST.</p>
+     * <p>When multiple streams are used in a request, the minimum frame
+     * duration will be max(individual stream min durations).</p>
+     * <p>The minimum frame duration of a stream (of a particular format, size)
+     * is the same regardless of whether the stream is input or output.</p>
+     * <p>See ACAMERA_SENSOR_FRAME_DURATION and
+     * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for more details about
+     * calculating the max frame rate.</p>
+     *
+     * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     */
+    ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS =              // int64[4*n]
+            ACAMERA_SCALER_START + 11,
+    /**
+     * <p>This lists the maximum stall duration for each
+     * output format/size combination.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>A stall duration is how much extra time would get added
+     * to the normal minimum frame duration for a repeating request
+     * that has streams with non-zero stall.</p>
+     * <p>For example, consider JPEG captures which have the following
+     * characteristics:</p>
+     * <ul>
+     * <li>JPEG streams act like processed YUV streams in requests for which
+     * they are not included; in requests in which they are directly
+     * referenced, they act as JPEG streams. This is because supporting a
+     * JPEG stream requires the underlying YUV data to always be ready for
+     * use by a JPEG encoder, but the encoder will only be used (and impact
+     * frame duration) on requests that actually reference a JPEG stream.</li>
+     * <li>The JPEG processor can run concurrently to the rest of the camera
+     * pipeline, but cannot process more than 1 capture at a time.</li>
+     * </ul>
+     * <p>In other words, using a repeating YUV request would result
+     * in a steady frame rate (let's say it's 30 FPS). If a single
+     * JPEG request is submitted periodically, the frame rate will stay
+     * at 30 FPS (as long as we wait for the previous JPEG to return each
+     * time). If we try to submit a repeating YUV + JPEG request, then
+     * the frame rate will drop from 30 FPS.</p>
+     * <p>In general, submitting a new request with a non-0 stall time
+     * stream will <em>not</em> cause a frame rate drop unless there are still
+     * outstanding buffers for that stream from previous requests.</p>
+     * <p>Submitting a repeating request with streams (call this <code>S</code>)
+     * is the same as setting the minimum frame duration from
+     * the normal minimum frame duration corresponding to <code>S</code>, added with
+     * the maximum stall duration for <code>S</code>.</p>
+     * <p>If interleaving requests with and without a stall duration,
+     * a request will stall by the maximum of the remaining times
+     * for each can-stall stream with outstanding buffers.</p>
+     * <p>This means that a stalling request will not have an exposure start
+     * until the stall has completed.</p>
+     * <p>This should correspond to the stall duration when only that stream is
+     * active, with all processing (typically in android.*.mode) set to FAST
+     * or OFF. Setting any of the processing modes to HIGH_QUALITY
+     * effectively results in an indeterminate stall duration for all
+     * streams in a request (the regular stall calculation rules are
+     * ignored).</p>
+     * <p>The following formats may always have a stall duration:</p>
+     * <ul>
+     * <li>{@link AIMAGE_FORMAT_JPEG}</li>
+     * <li>{@link AIMAGE_FORMAT_RAW16}</li>
+     * </ul>
+     * <p>The following formats will never have a stall duration:</p>
+     * <ul>
+     * <li>{@link AIMAGE_FORMAT_YUV_420_888}</li>
+     * <li>{@link AIMAGE_FORMAT_RAW10}</li>
+     * </ul>
+     * <p>All other formats may or may not have an allowed stall duration on
+     * a per-capability basis; refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     * for more details.</p>
+     * <p>See ACAMERA_SENSOR_FRAME_DURATION for more information about
+     * calculating the max frame rate (absent stalls).</p>
+     *
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     */
+    ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS =                  // int64[4*n]
+            ACAMERA_SCALER_START + 12,
+    /**
+     * <p>The crop type that this camera device supports.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>When passing a non-centered crop region (ACAMERA_SCALER_CROP_REGION) to a camera
+     * device that only supports CENTER_ONLY cropping, the camera device will move the
+     * crop region to the center of the sensor active array (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE)
+     * and keep the crop region width and height unchanged. The camera device will return the
+     * final used crop region in metadata result ACAMERA_SCALER_CROP_REGION.</p>
+     * <p>Camera devices that support FREEFORM cropping will support any crop region that
+     * is inside of the active array. The camera device will apply the same crop region and
+     * return the final used crop region in capture result metadata ACAMERA_SCALER_CROP_REGION.</p>
+     * <p>LEGACY capability devices will only support CENTER_ONLY cropping.</p>
+     *
+     * @see ACAMERA_SCALER_CROP_REGION
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_SCALER_CROPPING_TYPE =                              // byte (enum)
+            ACAMERA_SCALER_START + 13,
+    ACAMERA_SCALER_END,
+
+    /**
+     * <p>Duration each pixel is exposed to
+     * light.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>If the sensor can't expose this exact duration, it will shorten the
+     * duration exposed to the nearest possible value (rather than expose longer).
+     * The final exposure time used will be available in the output capture result.</p>
+     * <p>This control is only effective if ACAMERA_CONTROL_AE_MODE or ACAMERA_CONTROL_MODE is set to
+     * OFF; otherwise the auto-exposure algorithm will override this value.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_MODE
+     */
+    ACAMERA_SENSOR_EXPOSURE_TIME =                              // int64
+            ACAMERA_SENSOR_START,
+    /**
+     * <p>Duration from start of frame exposure to
+     * start of next frame exposure.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>The maximum frame rate that can be supported by a camera subsystem is
+     * a function of many factors:</p>
+     * <ul>
+     * <li>Requested resolutions of output image streams</li>
+     * <li>Availability of binning / skipping modes on the imager</li>
+     * <li>The bandwidth of the imager interface</li>
+     * <li>The bandwidth of the various ISP processing blocks</li>
+     * </ul>
+     * <p>Since these factors can vary greatly between different ISPs and
+     * sensors, the camera abstraction tries to represent the bandwidth
+     * restrictions with as simple a model as possible.</p>
+     * <p>The model presented has the following characteristics:</p>
+     * <ul>
+     * <li>The image sensor is always configured to output the smallest
+     * resolution possible given the application's requested output stream
+     * sizes.  The smallest resolution is defined as being at least as large
+     * as the largest requested output stream size; the camera pipeline must
+     * never digitally upsample sensor data when the crop region covers the
+     * whole sensor. In general, this means that if only small output stream
+     * resolutions are configured, the sensor can provide a higher frame
+     * rate.</li>
+     * <li>Since any request may use any or all the currently configured
+     * output streams, the sensor and ISP must be configured to support
+     * scaling a single capture to all the streams at the same time.  This
+     * means the camera pipeline must be ready to produce the largest
+     * requested output size without any delay.  Therefore, the overall
+     * frame rate of a given configured stream set is governed only by the
+     * largest requested stream resolution.</li>
+     * <li>Using more than one output stream in a request does not affect the
+     * frame duration.</li>
+     * <li>Certain format-streams may need to do additional background processing
+     * before data is consumed/produced by that stream. These processors
+     * can run concurrently to the rest of the camera pipeline, but
+     * cannot process more than 1 capture at a time.</li>
+     * </ul>
+     * <p>The necessary information for the application, given the model above,
+     * is provided via
+     * {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS}.
+     * These are used to determine the maximum frame rate / minimum frame
+     * duration that is possible for a given stream configuration.</p>
+     * <p>Specifically, the application can use the following rules to
+     * determine the minimum frame duration it can request from the camera
+     * device:</p>
+     * <ol>
+     * <li>Let the set of currently configured input/output streams
+     * be called <code>S</code>.</li>
+     * <li>Find the minimum frame durations for each stream in <code>S</code>, by looking
+     * it up in {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS}
+     * (with its respective size/format). Let this set of frame durations be
+     * called <code>F</code>.</li>
+     * <li>For any given request <code>R</code>, the minimum frame duration allowed
+     * for <code>R</code> is the maximum out of all values in <code>F</code>. Let the streams
+     * used in <code>R</code> be called <code>S_r</code>.</li>
+     * </ol>
+     * <p>If none of the streams in <code>S_r</code> have a stall time (listed in {@link
+     * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS}
+     * using its respective size/format), then the frame duration in <code>F</code>
+     * determines the steady state frame rate that the application will get
+     * if it uses <code>R</code> as a repeating request. Let this special kind of
+     * request be called <code>Rsimple</code>.</p>
+     * <p>A repeating request <code>Rsimple</code> can be <em>occasionally</em> interleaved
+     * by a single capture of a new request <code>Rstall</code> (which has at least
+     * one in-use stream with a non-0 stall time) and if <code>Rstall</code> has the
+     * same minimum frame duration this will not cause a frame rate loss
+     * if all buffers from the previous <code>Rstall</code> have already been
+     * delivered.</p>
+     * <p>For more details about stalling, see
+     * {@link ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS}.</p>
+     * <p>This control is only effective if ACAMERA_CONTROL_AE_MODE or ACAMERA_CONTROL_MODE is set to
+     * OFF; otherwise the auto-exposure algorithm will override this value.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_MODE
+     */
+    ACAMERA_SENSOR_FRAME_DURATION =                             // int64
+            ACAMERA_SENSOR_START + 1,
+    /**
+     * <p>The amount of gain applied to sensor data
+     * before processing.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>The sensitivity is the standard ISO sensitivity value,
+     * as defined in ISO 12232:2006.</p>
+     * <p>The sensitivity must be within ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE, and
+     * if if it less than ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY, the camera device
+     * is guaranteed to use only analog amplification for applying the gain.</p>
+     * <p>If the camera device cannot apply the exact sensitivity
+     * requested, it will reduce the gain to the nearest supported
+     * value. The final sensitivity used will be available in the
+     * output capture result.</p>
+     * <p>This control is only effective if ACAMERA_CONTROL_AE_MODE or ACAMERA_CONTROL_MODE is set to
+     * OFF; otherwise the auto-exposure algorithm will override this value.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE
+     * @see ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY
+     */
+    ACAMERA_SENSOR_SENSITIVITY =                                // int32
+            ACAMERA_SENSOR_START + 2,
+    /**
+     * <p>The standard reference illuminant used as the scene light source when
+     * calculating the ACAMERA_SENSOR_COLOR_TRANSFORM1,
+     * ACAMERA_SENSOR_CALIBRATION_TRANSFORM1, and
+     * ACAMERA_SENSOR_FORWARD_MATRIX1 matrices.</p>
+     *
+     * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM1
+     * @see ACAMERA_SENSOR_COLOR_TRANSFORM1
+     * @see ACAMERA_SENSOR_FORWARD_MATRIX1
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The values in this key correspond to the values defined for the
+     * EXIF LightSource tag. These illuminants are standard light sources
+     * that are often used calibrating camera devices.</p>
+     * <p>If this key is present, then ACAMERA_SENSOR_COLOR_TRANSFORM1,
+     * ACAMERA_SENSOR_CALIBRATION_TRANSFORM1, and
+     * ACAMERA_SENSOR_FORWARD_MATRIX1 will also be present.</p>
+     * <p>Some devices may choose to provide a second set of calibration
+     * information for improved quality, including
+     * ACAMERA_SENSOR_REFERENCE_ILLUMINANT2 and its corresponding matrices.</p>
+     *
+     * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM1
+     * @see ACAMERA_SENSOR_COLOR_TRANSFORM1
+     * @see ACAMERA_SENSOR_FORWARD_MATRIX1
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT2
+     */
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1 =                      // byte (enum)
+            ACAMERA_SENSOR_START + 3,
+    /**
+     * <p>The standard reference illuminant used as the scene light source when
+     * calculating the ACAMERA_SENSOR_COLOR_TRANSFORM2,
+     * ACAMERA_SENSOR_CALIBRATION_TRANSFORM2, and
+     * ACAMERA_SENSOR_FORWARD_MATRIX2 matrices.</p>
+     *
+     * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM2
+     * @see ACAMERA_SENSOR_COLOR_TRANSFORM2
+     * @see ACAMERA_SENSOR_FORWARD_MATRIX2
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>See ACAMERA_SENSOR_REFERENCE_ILLUMINANT1 for more details.</p>
+     * <p>If this key is present, then ACAMERA_SENSOR_COLOR_TRANSFORM2,
+     * ACAMERA_SENSOR_CALIBRATION_TRANSFORM2, and
+     * ACAMERA_SENSOR_FORWARD_MATRIX2 will also be present.</p>
+     *
+     * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM2
+     * @see ACAMERA_SENSOR_COLOR_TRANSFORM2
+     * @see ACAMERA_SENSOR_FORWARD_MATRIX2
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
+     */
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT2 =                      // byte
+            ACAMERA_SENSOR_START + 4,
+    /**
+     * <p>A per-device calibration transform matrix that maps from the
+     * reference sensor colorspace to the actual device sensor colorspace.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This matrix is used to correct for per-device variations in the
+     * sensor colorspace, and is used for processing raw buffer data.</p>
+     * <p>The matrix is expressed as a 3x3 matrix in row-major-order, and
+     * contains a per-device calibration transform that maps colors
+     * from reference sensor color space (i.e. the "golden module"
+     * colorspace) into this camera device's native sensor color
+     * space under the first reference illuminant
+     * (ACAMERA_SENSOR_REFERENCE_ILLUMINANT1).</p>
+     *
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
+     */
+    ACAMERA_SENSOR_CALIBRATION_TRANSFORM1 =                     // rational[3*3]
+            ACAMERA_SENSOR_START + 5,
+    /**
+     * <p>A per-device calibration transform matrix that maps from the
+     * reference sensor colorspace to the actual device sensor colorspace
+     * (this is the colorspace of the raw buffer data).</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This matrix is used to correct for per-device variations in the
+     * sensor colorspace, and is used for processing raw buffer data.</p>
+     * <p>The matrix is expressed as a 3x3 matrix in row-major-order, and
+     * contains a per-device calibration transform that maps colors
+     * from reference sensor color space (i.e. the "golden module"
+     * colorspace) into this camera device's native sensor color
+     * space under the second reference illuminant
+     * (ACAMERA_SENSOR_REFERENCE_ILLUMINANT2).</p>
+     * <p>This matrix will only be present if the second reference
+     * illuminant is present.</p>
+     *
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT2
+     */
+    ACAMERA_SENSOR_CALIBRATION_TRANSFORM2 =                     // rational[3*3]
+            ACAMERA_SENSOR_START + 6,
+    /**
+     * <p>A matrix that transforms color values from CIE XYZ color space to
+     * reference sensor color space.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This matrix is used to convert from the standard CIE XYZ color
+     * space to the reference sensor colorspace, and is used when processing
+     * raw buffer data.</p>
+     * <p>The matrix is expressed as a 3x3 matrix in row-major-order, and
+     * contains a color transform matrix that maps colors from the CIE
+     * XYZ color space to the reference sensor color space (i.e. the
+     * "golden module" colorspace) under the first reference illuminant
+     * (ACAMERA_SENSOR_REFERENCE_ILLUMINANT1).</p>
+     * <p>The white points chosen in both the reference sensor color space
+     * and the CIE XYZ colorspace when calculating this transform will
+     * match the standard white point for the first reference illuminant
+     * (i.e. no chromatic adaptation will be applied by this transform).</p>
+     *
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
+     */
+    ACAMERA_SENSOR_COLOR_TRANSFORM1 =                           // rational[3*3]
+            ACAMERA_SENSOR_START + 7,
+    /**
+     * <p>A matrix that transforms color values from CIE XYZ color space to
+     * reference sensor color space.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This matrix is used to convert from the standard CIE XYZ color
+     * space to the reference sensor colorspace, and is used when processing
+     * raw buffer data.</p>
+     * <p>The matrix is expressed as a 3x3 matrix in row-major-order, and
+     * contains a color transform matrix that maps colors from the CIE
+     * XYZ color space to the reference sensor color space (i.e. the
+     * "golden module" colorspace) under the second reference illuminant
+     * (ACAMERA_SENSOR_REFERENCE_ILLUMINANT2).</p>
+     * <p>The white points chosen in both the reference sensor color space
+     * and the CIE XYZ colorspace when calculating this transform will
+     * match the standard white point for the second reference illuminant
+     * (i.e. no chromatic adaptation will be applied by this transform).</p>
+     * <p>This matrix will only be present if the second reference
+     * illuminant is present.</p>
+     *
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT2
+     */
+    ACAMERA_SENSOR_COLOR_TRANSFORM2 =                           // rational[3*3]
+            ACAMERA_SENSOR_START + 8,
+    /**
+     * <p>A matrix that transforms white balanced camera colors from the reference
+     * sensor colorspace to the CIE XYZ colorspace with a D50 whitepoint.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This matrix is used to convert to the standard CIE XYZ colorspace, and
+     * is used when processing raw buffer data.</p>
+     * <p>This matrix is expressed as a 3x3 matrix in row-major-order, and contains
+     * a color transform matrix that maps white balanced colors from the
+     * reference sensor color space to the CIE XYZ color space with a D50 white
+     * point.</p>
+     * <p>Under the first reference illuminant (ACAMERA_SENSOR_REFERENCE_ILLUMINANT1)
+     * this matrix is chosen so that the standard white point for this reference
+     * illuminant in the reference sensor colorspace is mapped to D50 in the
+     * CIE XYZ colorspace.</p>
+     *
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
+     */
+    ACAMERA_SENSOR_FORWARD_MATRIX1 =                            // rational[3*3]
+            ACAMERA_SENSOR_START + 9,
+    /**
+     * <p>A matrix that transforms white balanced camera colors from the reference
+     * sensor colorspace to the CIE XYZ colorspace with a D50 whitepoint.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This matrix is used to convert to the standard CIE XYZ colorspace, and
+     * is used when processing raw buffer data.</p>
+     * <p>This matrix is expressed as a 3x3 matrix in row-major-order, and contains
+     * a color transform matrix that maps white balanced colors from the
+     * reference sensor color space to the CIE XYZ color space with a D50 white
+     * point.</p>
+     * <p>Under the second reference illuminant (ACAMERA_SENSOR_REFERENCE_ILLUMINANT2)
+     * this matrix is chosen so that the standard white point for this reference
+     * illuminant in the reference sensor colorspace is mapped to D50 in the
+     * CIE XYZ colorspace.</p>
+     * <p>This matrix will only be present if the second reference
+     * illuminant is present.</p>
+     *
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT2
+     */
+    ACAMERA_SENSOR_FORWARD_MATRIX2 =                            // rational[3*3]
+            ACAMERA_SENSOR_START + 10,
+    /**
+     * <p>A fixed black level offset for each of the color filter arrangement
+     * (CFA) mosaic channels.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This key specifies the zero light value for each of the CFA mosaic
+     * channels in the camera sensor.  The maximal value output by the
+     * sensor is represented by the value in ACAMERA_SENSOR_INFO_WHITE_LEVEL.</p>
+     * <p>The values are given in the same order as channels listed for the CFA
+     * layout key (see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT), i.e. the
+     * nth value given corresponds to the black level offset for the nth
+     * color channel listed in the CFA.</p>
+     * <p>The black level values of captured images may vary for different
+     * capture settings (e.g., ACAMERA_SENSOR_SENSITIVITY). This key
+     * represents a coarse approximation for such case. It is recommended to
+     * use ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL or use pixels from
+     * ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS directly for captures when
+     * supported by the camera device, which provides more accurate black
+     * level values. For raw capture in particular, it is recommended to use
+     * pixels from ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS to calculate black
+     * level values for each frame.</p>
+     *
+     * @see ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL
+     * @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
+     * @see ACAMERA_SENSOR_INFO_WHITE_LEVEL
+     * @see ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_SENSOR_BLACK_LEVEL_PATTERN =                        // int32[4]
+            ACAMERA_SENSOR_START + 12,
+    /**
+     * <p>Maximum sensitivity that is implemented
+     * purely through analog gain.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>For ACAMERA_SENSOR_SENSITIVITY values less than or
+     * equal to this, all applied gain must be analog. For
+     * values above this, the gain applied can be a mix of analog and
+     * digital.</p>
+     *
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY =                     // int32
+            ACAMERA_SENSOR_START + 13,
+    /**
+     * <p>Clockwise angle through which the output image needs to be rotated to be
+     * upright on the device screen in its native orientation.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Also defines the direction of rolling shutter readout, which is from top to bottom in
+     * the sensor's coordinate system.</p>
+     */
+    ACAMERA_SENSOR_ORIENTATION =                                // int32
+            ACAMERA_SENSOR_START + 14,
+    /**
+     * <p>Time at start of exposure of first
+     * row of the image sensor active array, in nanoseconds.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>The timestamps are also included in all image
+     * buffers produced for the same capture, and will be identical
+     * on all the outputs.</p>
+     * <p>When ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE <code>==</code> UNKNOWN,
+     * the timestamps measure time since an unspecified starting point,
+     * and are monotonically increasing. They can be compared with the
+     * timestamps for other captures from the same camera device, but are
+     * not guaranteed to be comparable to any other time source.</p>
+     * <p>When ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE <code>==</code> REALTIME, the
+     * timestamps measure time in the same timebase as
+     * <a href="https://developer.android.com/reference/android/os/SystemClock.html#elapsedRealtimeNanos">elapsedRealtimeNanos</a>
+     * (or CLOCK_BOOTTIME), and they can
+     * be compared to other timestamps from other subsystems that
+     * are using that base.</p>
+     * <p>For reprocessing, the timestamp will match the start of exposure of
+     * the input image, i.e. {@link CaptureResult#SENSOR_TIMESTAMP the
+     * timestamp} in the TotalCaptureResult that was used to create the
+     * reprocess capture request.</p>
+     *
+     * @see ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE
+     */
+    ACAMERA_SENSOR_TIMESTAMP =                                  // int64
+            ACAMERA_SENSOR_START + 16,
+    /**
+     * <p>The estimated camera neutral color in the native sensor colorspace at
+     * the time of capture.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>This value gives the neutral color point encoded as an RGB value in the
+     * native sensor color space.  The neutral color point indicates the
+     * currently estimated white point of the scene illumination.  It can be
+     * used to interpolate between the provided color transforms when
+     * processing raw sensor data.</p>
+     * <p>The order of the values is R, G, B; where R is in the lowest index.</p>
+     */
+    ACAMERA_SENSOR_NEUTRAL_COLOR_POINT =                        // rational[3]
+            ACAMERA_SENSOR_START + 18,
+    /**
+     * <p>Noise model coefficients for each CFA mosaic channel.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>This key contains two noise model coefficients for each CFA channel
+     * corresponding to the sensor amplification (S) and sensor readout
+     * noise (O).  These are given as pairs of coefficients for each channel
+     * in the same order as channels listed for the CFA layout key
+     * (see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT).  This is
+     * represented as an array of Pair&lt;Double, Double&gt;, where
+     * the first member of the Pair at index n is the S coefficient and the
+     * second member is the O coefficient for the nth color channel in the CFA.</p>
+     * <p>These coefficients are used in a two parameter noise model to describe
+     * the amount of noise present in the image for each CFA channel.  The
+     * noise model used here is:</p>
+     * <p>N(x) = sqrt(Sx + O)</p>
+     * <p>Where x represents the recorded signal of a CFA channel normalized to
+     * the range [0, 1], and S and O are the noise model coeffiecients for
+     * that channel.</p>
+     * <p>A more detailed description of the noise model can be found in the
+     * Adobe DNG specification for the NoiseProfile tag.</p>
+     *
+     * @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
+     */
+    ACAMERA_SENSOR_NOISE_PROFILE =                              // double[2*CFA Channels]
+            ACAMERA_SENSOR_START + 19,
+    /**
+     * <p>The worst-case divergence between Bayer green channels.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>This value is an estimate of the worst case split between the
+     * Bayer green channels in the red and blue rows in the sensor color
+     * filter array.</p>
+     * <p>The green split is calculated as follows:</p>
+     * <ol>
+     * <li>A 5x5 pixel (or larger) window W within the active sensor array is
+     * chosen. The term 'pixel' here is taken to mean a group of 4 Bayer
+     * mosaic channels (R, Gr, Gb, B).  The location and size of the window
+     * chosen is implementation defined, and should be chosen to provide a
+     * green split estimate that is both representative of the entire image
+     * for this camera sensor, and can be calculated quickly.</li>
+     * <li>The arithmetic mean of the green channels from the red
+     * rows (mean_Gr) within W is computed.</li>
+     * <li>The arithmetic mean of the green channels from the blue
+     * rows (mean_Gb) within W is computed.</li>
+     * <li>The maximum ratio R of the two means is computed as follows:
+     * <code>R = max((mean_Gr + 1)/(mean_Gb + 1), (mean_Gb + 1)/(mean_Gr + 1))</code></li>
+     * </ol>
+     * <p>The ratio R is the green split divergence reported for this property,
+     * which represents how much the green channels differ in the mosaic
+     * pattern.  This value is typically used to determine the treatment of
+     * the green mosaic channels when demosaicing.</p>
+     * <p>The green split value can be roughly interpreted as follows:</p>
+     * <ul>
+     * <li>R &lt; 1.03 is a negligible split (&lt;3% divergence).</li>
+     * <li>1.20 &lt;= R &gt;= 1.03 will require some software
+     * correction to avoid demosaic errors (3-20% divergence).</li>
+     * <li>R &gt; 1.20 will require strong software correction to produce
+     * a usuable image (&gt;20% divergence).</li>
+     * </ul>
+     */
+    ACAMERA_SENSOR_GREEN_SPLIT =                                // float
+            ACAMERA_SENSOR_START + 22,
+    /**
+     * <p>A pixel <code>[R, G_even, G_odd, B]</code> that supplies the test pattern
+     * when ACAMERA_SENSOR_TEST_PATTERN_MODE is SOLID_COLOR.</p>
+     *
+     * @see ACAMERA_SENSOR_TEST_PATTERN_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Each color channel is treated as an unsigned 32-bit integer.
+     * The camera device then uses the most significant X bits
+     * that correspond to how many bits are in its Bayer raw sensor
+     * output.</p>
+     * <p>For example, a sensor with RAW10 Bayer output would use the
+     * 10 most significant bits from each color channel.</p>
+     */
+    ACAMERA_SENSOR_TEST_PATTERN_DATA =                          // int32[4]
+            ACAMERA_SENSOR_START + 23,
+    /**
+     * <p>When enabled, the sensor sends a test pattern instead of
+     * doing a real exposure from the camera.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When a test pattern is enabled, all manual sensor controls specified
+     * by ACAMERA_SENSOR_* will be ignored. All other controls should
+     * work as normal.</p>
+     * <p>For example, if manual flash is enabled, flash firing should still
+     * occur (and that the test pattern remain unmodified, since the flash
+     * would not actually affect it).</p>
+     * <p>Defaults to OFF.</p>
+     */
+    ACAMERA_SENSOR_TEST_PATTERN_MODE =                          // int32 (enum)
+            ACAMERA_SENSOR_START + 24,
+    /**
+     * <p>List of sensor test pattern modes for ACAMERA_SENSOR_TEST_PATTERN_MODE
+     * supported by this camera device.</p>
+     *
+     * @see ACAMERA_SENSOR_TEST_PATTERN_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Defaults to OFF, and always includes OFF if defined.</p>
+     */
+    ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES =               // int32[n]
+            ACAMERA_SENSOR_START + 25,
+    /**
+     * <p>Duration between the start of first row exposure
+     * and the start of last row exposure.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>This is the exposure time skew between the first and last
+     * row exposure start times. The first row and the last row are
+     * the first and last rows inside of the
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
+     * <p>For typical camera sensors that use rolling shutters, this is also equivalent
+     * to the frame readout time.</p>
+     *
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_SENSOR_ROLLING_SHUTTER_SKEW =                       // int64
+            ACAMERA_SENSOR_START + 26,
+    /**
+     * <p>List of disjoint rectangles indicating the sensor
+     * optically shielded black pixel regions.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>In most camera sensors, the active array is surrounded by some
+     * optically shielded pixel areas. By blocking light, these pixels
+     * provides a reliable black reference for black level compensation
+     * in active array region.</p>
+     * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
+     * <p>This key provides a list of disjoint rectangles specifying the
+     * regions of optically shielded (with metal shield) black pixel
+     * regions if the camera device is capable of reading out these black
+     * pixels in the output raw images. In comparison to the fixed black
+     * level values reported by ACAMERA_SENSOR_BLACK_LEVEL_PATTERN, this key
+     * may provide a more accurate way for the application to calculate
+     * black level of each captured raw images.</p>
+     * <p>When this key is reported, the ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL and
+     * ACAMERA_SENSOR_DYNAMIC_WHITE_LEVEL will also be reported.</p>
+     *
+     * @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
+     * @see ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL
+     * @see ACAMERA_SENSOR_DYNAMIC_WHITE_LEVEL
+     */
+    ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS =                      // int32[4*num_regions]
+            ACAMERA_SENSOR_START + 27,
+    /**
+     * <p>A per-frame dynamic black level offset for each of the color filter
+     * arrangement (CFA) mosaic channels.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Camera sensor black levels may vary dramatically for different
+     * capture settings (e.g. ACAMERA_SENSOR_SENSITIVITY). The fixed black
+     * level reported by ACAMERA_SENSOR_BLACK_LEVEL_PATTERN may be too
+     * inaccurate to represent the actual value on a per-frame basis. The
+     * camera device internal pipeline relies on reliable black level values
+     * to process the raw images appropriately. To get the best image
+     * quality, the camera device may choose to estimate the per frame black
+     * level values either based on optically shielded black regions
+     * (ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS) or its internal model.</p>
+     * <p>This key reports the camera device estimated per-frame zero light
+     * value for each of the CFA mosaic channels in the camera sensor. The
+     * ACAMERA_SENSOR_BLACK_LEVEL_PATTERN may only represent a coarse
+     * approximation of the actual black level values. This value is the
+     * black level used in camera device internal image processing pipeline
+     * and generally more accurate than the fixed black level values.
+     * However, since they are estimated values by the camera device, they
+     * may not be as accurate as the black level values calculated from the
+     * optical black pixels reported by ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS.</p>
+     * <p>The values are given in the same order as channels listed for the CFA
+     * layout key (see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT), i.e. the
+     * nth value given corresponds to the black level offset for the nth
+     * color channel listed in the CFA.</p>
+     * <p>This key will be available if ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS is
+     * available or the camera device advertises this key via
+     * {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS}.</p>
+     *
+     * @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
+     * @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
+     * @see ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL =                        // float[4]
+            ACAMERA_SENSOR_START + 28,
+    /**
+     * <p>Maximum raw value output by sensor for this frame.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Since the ACAMERA_SENSOR_BLACK_LEVEL_PATTERN may change for different
+     * capture settings (e.g., ACAMERA_SENSOR_SENSITIVITY), the white
+     * level will change accordingly. This key is similar to
+     * ACAMERA_SENSOR_INFO_WHITE_LEVEL, but specifies the camera device
+     * estimated white level for each frame.</p>
+     * <p>This key will be available if ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS is
+     * available or the camera device advertises this key via
+     * {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS}.</p>
+     *
+     * @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
+     * @see ACAMERA_SENSOR_INFO_WHITE_LEVEL
+     * @see ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_SENSOR_DYNAMIC_WHITE_LEVEL =                        // int32
+            ACAMERA_SENSOR_START + 29,
+    ACAMERA_SENSOR_END,
+
+    /**
+     * <p>The area of the image sensor which corresponds to active pixels after any geometric
+     * distortion correction has been applied.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This is the rectangle representing the size of the active region of the sensor (i.e.
+     * the region that actually receives light from the scene) after any geometric correction
+     * has been applied, and should be treated as the maximum size in pixels of any of the
+     * image output formats aside from the raw formats.</p>
+     * <p>This rectangle is defined relative to the full pixel array; (0,0) is the top-left of
+     * the full pixel array, and the size of the full pixel array is given by
+     * ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE.</p>
+     * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
+     * <p>The coordinate system for most other keys that list pixel coordinates, including
+     * ACAMERA_SCALER_CROP_REGION, is defined relative to the active array rectangle given in
+     * this field, with <code>(0, 0)</code> being the top-left of this rectangle.</p>
+     * <p>The active array may be smaller than the full pixel array, since the full array may
+     * include black calibration pixels or other inactive regions, and geometric correction
+     * resulting in scaling or cropping may have been applied.</p>
+     *
+     * @see ACAMERA_SCALER_CROP_REGION
+     * @see ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
+     */
+    ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE =                     // int32[4]
+            ACAMERA_SENSOR_INFO_START,
+    /**
+     * <p>Range of sensitivities for ACAMERA_SENSOR_SENSITIVITY supported by this
+     * camera device.</p>
+     *
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The values are the standard ISO sensitivity values,
+     * as defined in ISO 12232:2006.</p>
+     */
+    ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE =                     // int32[2]
+            ACAMERA_SENSOR_INFO_START + 1,
+    /**
+     * <p>The arrangement of color filters on sensor;
+     * represents the colors in the top-left 2x2 section of
+     * the sensor, in reading order.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT =              // byte (enum)
+            ACAMERA_SENSOR_INFO_START + 2,
+    /**
+     * <p>The range of image exposure times for ACAMERA_SENSOR_EXPOSURE_TIME supported
+     * by this camera device.</p>
+     *
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE =                   // int64[2]
+            ACAMERA_SENSOR_INFO_START + 3,
+    /**
+     * <p>The maximum possible frame duration (minimum frame rate) for
+     * ACAMERA_SENSOR_FRAME_DURATION that is supported this camera device.</p>
+     *
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Attempting to use frame durations beyond the maximum will result in the frame
+     * duration being clipped to the maximum. See that control for a full definition of frame
+     * durations.</p>
+     * <p>Refer to {@link
+     * ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS}
+     * for the minimum frame duration values.</p>
+     */
+    ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION =                    // int64
+            ACAMERA_SENSOR_INFO_START + 4,
+    /**
+     * <p>The physical dimensions of the full pixel
+     * array.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This is the physical size of the sensor pixel
+     * array defined by ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE.</p>
+     *
+     * @see ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
+     */
+    ACAMERA_SENSOR_INFO_PHYSICAL_SIZE =                         // float[2]
+            ACAMERA_SENSOR_INFO_START + 5,
+    /**
+     * <p>Dimensions of the full pixel array, possibly
+     * including black calibration pixels.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The pixel count of the full pixel array of the image sensor, which covers
+     * ACAMERA_SENSOR_INFO_PHYSICAL_SIZE area.  This represents the full pixel dimensions of
+     * the raw buffers produced by this sensor.</p>
+     * <p>If a camera device supports raw sensor formats, either this or
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE is the maximum dimensions for the raw
+     * output formats listed in ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS (this depends on
+     * whether or not the image sensor returns buffers containing pixels that are not
+     * part of the active array region for blacklevel calibration or other purposes).</p>
+     * <p>Some parts of the full pixel array may not receive light from the scene,
+     * or be otherwise inactive.  The ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE key
+     * defines the rectangle of active pixels that will be included in processed image
+     * formats.</p>
+     *
+     * @see ACAMERA_SENSOR_INFO_PHYSICAL_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE =                      // int32[2]
+            ACAMERA_SENSOR_INFO_START + 6,
+    /**
+     * <p>Maximum raw value output by sensor.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This specifies the fully-saturated encoding level for the raw
+     * sample values from the sensor.  This is typically caused by the
+     * sensor becoming highly non-linear or clipping. The minimum for
+     * each channel is specified by the offset in the
+     * ACAMERA_SENSOR_BLACK_LEVEL_PATTERN key.</p>
+     * <p>The white level is typically determined either by sensor bit depth
+     * (8-14 bits is expected), or by the point where the sensor response
+     * becomes too non-linear to be useful.  The default value for this is
+     * maximum representable value for a 16-bit raw sample (2^16 - 1).</p>
+     * <p>The white level values of captured images may vary for different
+     * capture settings (e.g., ACAMERA_SENSOR_SENSITIVITY). This key
+     * represents a coarse approximation for such case. It is recommended
+     * to use ACAMERA_SENSOR_DYNAMIC_WHITE_LEVEL for captures when supported
+     * by the camera device, which provides more accurate white level values.</p>
+     *
+     * @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
+     * @see ACAMERA_SENSOR_DYNAMIC_WHITE_LEVEL
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_SENSOR_INFO_WHITE_LEVEL =                           // int32
+            ACAMERA_SENSOR_INFO_START + 7,
+    /**
+     * <p>The time base source for sensor capture start timestamps.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The timestamps provided for captures are always in nanoseconds and monotonic, but
+     * may not based on a time source that can be compared to other system time sources.</p>
+     * <p>This characteristic defines the source for the timestamps, and therefore whether they
+     * can be compared against other system time sources/timestamps.</p>
+     */
+    ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE =                      // byte (enum)
+            ACAMERA_SENSOR_INFO_START + 8,
+    /**
+     * <p>Whether the RAW images output from this camera device are subject to
+     * lens shading correction.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If TRUE, all images produced by the camera device in the RAW image formats will
+     * have lens shading correction already applied to it. If FALSE, the images will
+     * not be adjusted for lens shading correction.
+     * See android.request.maxNumOutputRaw for a list of RAW image formats.</p>
+     * <p>This key will be <code>null</code> for all devices do not report this information.
+     * Devices with RAW capability will always report this information in this key.</p>
+     */
+    ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED =                  // byte (enum)
+            ACAMERA_SENSOR_INFO_START + 9,
+    /**
+     * <p>The area of the image sensor which corresponds to active pixels prior to the
+     * application of any geometric distortion correction.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
+     * <p>This is the rectangle representing the size of the active region of the sensor (i.e.
+     * the region that actually receives light from the scene) before any geometric correction
+     * has been applied, and should be treated as the active region rectangle for any of the
+     * raw formats.  All metadata associated with raw processing (e.g. the lens shading
+     * correction map, and radial distortion fields) treats the top, left of this rectangle as
+     * the origin, (0,0).</p>
+     * <p>The size of this region determines the maximum field of view and the maximum number of
+     * pixels that an image from this sensor can contain, prior to the application of
+     * geometric distortion correction. The effective maximum pixel dimensions of a
+     * post-distortion-corrected image is given by the ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * field, and the effective maximum field of view for a post-distortion-corrected image
+     * can be calculated by applying the geometric distortion correction fields to this
+     * rectangle, and cropping to the rectangle given in ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
+     * <p>E.g. to calculate position of a pixel, (x,y), in a processed YUV output image with the
+     * dimensions in ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE given the position of a pixel,
+     * (x', y'), in the raw pixel array with dimensions give in
+     * ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE:</p>
+     * <ol>
+     * <li>Choose a pixel (x', y') within the active array region of the raw buffer given in
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, otherwise this pixel is considered
+     * to be outside of the FOV, and will not be shown in the processed output image.</li>
+     * <li>Apply geometric distortion correction to get the post-distortion pixel coordinate,
+     * (x_i, y_i). When applying geometric correction metadata, note that metadata for raw
+     * buffers is defined relative to the top, left of the
+     * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE rectangle.</li>
+     * <li>If the resulting corrected pixel coordinate is within the region given in
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, then the position of this pixel in the
+     * processed output image buffer is <code>(x_i - activeArray.left, y_i - activeArray.top)</code>,
+     * when the top, left coordinate of that buffer is treated as (0, 0).</li>
+     * </ol>
+     * <p>Thus, for pixel x',y' = (25, 25) on a sensor where ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
+     * is (100,100), ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE is (10, 10, 100, 100),
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE is (20, 20, 80, 80), and the geometric distortion
+     * correction doesn't change the pixel coordinate, the resulting pixel selected in
+     * pixel coordinates would be x,y = (25, 25) relative to the top,left of the raw buffer
+     * with dimensions given in ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE, and would be (5, 5)
+     * relative to the top,left of post-processed YUV output buffer with dimensions given in
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
+     * <p>The currently supported fields that correct for geometric distortion are:</p>
+     * <ol>
+     * <li>ACAMERA_LENS_RADIAL_DISTORTION.</li>
+     * </ol>
+     * <p>If all of the geometric distortion fields are no-ops, this rectangle will be the same
+     * as the post-distortion-corrected rectangle given in
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
+     * <p>This rectangle is defined relative to the full pixel array; (0,0) is the top-left of
+     * the full pixel array, and the size of the full pixel array is given by
+     * ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE.</p>
+     * <p>The pre-correction active array may be smaller than the full pixel array, since the
+     * full array may include black calibration pixels or other inactive regions.</p>
+     *
+     * @see ACAMERA_LENS_RADIAL_DISTORTION
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE =      // int32[4]
+            ACAMERA_SENSOR_INFO_START + 10,
+    ACAMERA_SENSOR_INFO_END,
+
+    /**
+     * <p>Quality of lens shading correction applied
+     * to the image data.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When set to OFF mode, no lens shading correction will be applied by the
+     * camera device, and an identity lens shading map data will be provided
+     * if <code>ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE == ON</code>. For example, for lens
+     * shading map with size of <code>[ 4, 3 ]</code>,
+     * the output android.statistics.lensShadingCorrectionMap for this case will be an identity
+     * map shown below:</p>
+     * <pre><code>[ 1.0, 1.0, 1.0, 1.0,  1.0, 1.0, 1.0, 1.0,
+     *  1.0, 1.0, 1.0, 1.0,  1.0, 1.0, 1.0, 1.0,
+     *  1.0, 1.0, 1.0, 1.0,  1.0, 1.0, 1.0, 1.0,
+     *  1.0, 1.0, 1.0, 1.0,  1.0, 1.0, 1.0, 1.0,
+     *  1.0, 1.0, 1.0, 1.0,  1.0, 1.0, 1.0, 1.0,
+     *  1.0, 1.0, 1.0, 1.0,  1.0, 1.0, 1.0, 1.0 ]
+     * </code></pre>
+     * <p>When set to other modes, lens shading correction will be applied by the camera
+     * device. Applications can request lens shading map data by setting
+     * ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE to ON, and then the camera device will provide lens
+     * shading map data in android.statistics.lensShadingCorrectionMap; the returned shading map
+     * data will be the one applied by the camera device for this capture request.</p>
+     * <p>The shading map data may depend on the auto-exposure (AE) and AWB statistics, therefore
+     * the reliability of the map data may be affected by the AE and AWB algorithms. When AE and
+     * AWB are in AUTO modes(ACAMERA_CONTROL_AE_MODE <code>!=</code> OFF and ACAMERA_CONTROL_AWB_MODE <code>!=</code>
+     * OFF), to get best results, it is recommended that the applications wait for the AE and AWB
+     * to be converged before using the returned shading map data.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AWB_MODE
+     * @see ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE
+     */
+    ACAMERA_SHADING_MODE =                                      // byte (enum)
+            ACAMERA_SHADING_START,
+    /**
+     * <p>List of lens shading modes for ACAMERA_SHADING_MODE that are supported by this camera device.</p>
+     *
+     * @see ACAMERA_SHADING_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This list contains lens shading modes that can be set for the camera device.
+     * Camera devices that support the MANUAL_POST_PROCESSING capability will always
+     * list OFF and FAST mode. This includes all FULL level devices.
+     * LEGACY devices will always only support FAST mode.</p>
+     */
+    ACAMERA_SHADING_AVAILABLE_MODES =                           // byte[n]
+            ACAMERA_SHADING_START + 2,
+    ACAMERA_SHADING_END,
+
+    /**
+     * <p>Operating mode for the face detector
+     * unit.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Whether face detection is enabled, and whether it
+     * should output just the basic fields or the full set of
+     * fields.</p>
+     */
+    ACAMERA_STATISTICS_FACE_DETECT_MODE =                       // byte (enum)
+            ACAMERA_STATISTICS_START,
+    /**
+     * <p>Operating mode for hot pixel map generation.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>If set to <code>true</code>, a hot pixel map is returned in ACAMERA_STATISTICS_HOT_PIXEL_MAP.
+     * If set to <code>false</code>, no hot pixel map will be returned.</p>
+     *
+     * @see ACAMERA_STATISTICS_HOT_PIXEL_MAP
+     */
+    ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE =                     // byte (enum)
+            ACAMERA_STATISTICS_START + 3,
+    /**
+     * <p>List of unique IDs for detected faces.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Each detected face is given a unique ID that is valid for as long as the face is visible
+     * to the camera device.  A face that leaves the field of view and later returns may be
+     * assigned a new ID.</p>
+     * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE == FULL</p>
+     *
+     * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
+     */
+    ACAMERA_STATISTICS_FACE_IDS =                               // int32[n]
+            ACAMERA_STATISTICS_START + 4,
+    /**
+     * <p>List of landmarks for detected
+     * faces.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>The coordinate system is that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the active array.</p>
+     * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE == FULL</p>
+     *
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
+     */
+    ACAMERA_STATISTICS_FACE_LANDMARKS =                         // int32[n*6]
+            ACAMERA_STATISTICS_START + 5,
+    /**
+     * <p>List of the bounding rectangles for detected
+     * faces.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
+     * <p>The coordinate system is that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
+     * <code>(0, 0)</code> being the top-left pixel of the active array.</p>
+     * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE != OFF</p>
+     *
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
+     */
+    ACAMERA_STATISTICS_FACE_RECTANGLES =                        // int32[n*4]
+            ACAMERA_STATISTICS_START + 6,
+    /**
+     * <p>List of the face confidence scores for
+     * detected faces</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE != OFF.</p>
+     *
+     * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
+     */
+    ACAMERA_STATISTICS_FACE_SCORES =                            // byte[n]
+            ACAMERA_STATISTICS_START + 7,
+    /**
+     * <p>The shading map is a low-resolution floating-point map
+     * that lists the coefficients used to correct for vignetting and color shading,
+     * for each Bayer color channel of RAW image data.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>The map provided here is the same map that is used by the camera device to
+     * correct both color shading and vignetting for output non-RAW images.</p>
+     * <p>When there is no lens shading correction applied to RAW
+     * output images (ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED <code>==</code>
+     * false), this map is the complete lens shading correction
+     * map; when there is some lens shading correction applied to
+     * the RAW output image (ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED<code>==</code> true), this map reports the remaining lens shading
+     * correction map that needs to be applied to get shading
+     * corrected images that match the camera device's output for
+     * non-RAW formats.</p>
+     * <p>For a complete shading correction map, the least shaded
+     * section of the image will have a gain factor of 1; all
+     * other sections will have gains above 1.</p>
+     * <p>When ACAMERA_COLOR_CORRECTION_MODE = TRANSFORM_MATRIX, the map
+     * will take into account the colorCorrection settings.</p>
+     * <p>The shading map is for the entire active pixel array, and is not
+     * affected by the crop region specified in the request. Each shading map
+     * entry is the value of the shading compensation map over a specific
+     * pixel on the sensor.  Specifically, with a (N x M) resolution shading
+     * map, and an active pixel array size (W x H), shading map entry
+     * (x,y) ϵ (0 ... N-1, 0 ... M-1) is the value of the shading map at
+     * pixel ( ((W-1)/(N-1)) * x, ((H-1)/(M-1)) * y) for the four color channels.
+     * The map is assumed to be bilinearly interpolated between the sample points.</p>
+     * <p>The channel order is [R, Geven, Godd, B], where Geven is the green
+     * channel for the even rows of a Bayer pattern, and Godd is the odd rows.
+     * The shading map is stored in a fully interleaved format, and its size
+     * is provided in the camera static metadata by ACAMERA_LENS_INFO_SHADING_MAP_SIZE.</p>
+     * <p>The shading map will generally have on the order of 30-40 rows and columns,
+     * and will be smaller than 64x64.</p>
+     * <p>As an example, given a very small map defined as:</p>
+     * <pre><code>ACAMERA_LENS_INFO_SHADING_MAP_SIZE = [ 4, 3 ]
+     * ACAMERA_STATISTICS_LENS_SHADING_MAP =
+     * [ 1.3, 1.2, 1.15, 1.2,  1.2, 1.2, 1.15, 1.2,
+     *     1.1, 1.2, 1.2, 1.2,  1.3, 1.2, 1.3, 1.3,
+     *   1.2, 1.2, 1.25, 1.1,  1.1, 1.1, 1.1, 1.0,
+     *     1.0, 1.0, 1.0, 1.0,  1.2, 1.3, 1.25, 1.2,
+     *   1.3, 1.2, 1.2, 1.3,   1.2, 1.15, 1.1, 1.2,
+     *     1.2, 1.1, 1.0, 1.2,  1.3, 1.15, 1.2, 1.3 ]
+     * </code></pre>
+     * <p>The low-resolution scaling map images for each channel are
+     * (displayed using nearest-neighbor interpolation):</p>
+     * <p><img alt="Red lens shading map" src="../images/camera2/metadata/android.statistics.lensShadingMap/red_shading.png" />
+     * <img alt="Green (even rows) lens shading map" src="../images/camera2/metadata/android.statistics.lensShadingMap/green_e_shading.png" />
+     * <img alt="Green (odd rows) lens shading map" src="../images/camera2/metadata/android.statistics.lensShadingMap/green_o_shading.png" />
+     * <img alt="Blue lens shading map" src="../images/camera2/metadata/android.statistics.lensShadingMap/blue_shading.png" /></p>
+     * <p>As a visualization only, inverting the full-color map to recover an
+     * image of a gray wall (using bicubic interpolation for visual quality)
+     * as captured by the sensor gives:</p>
+     * <p><img alt="Image of a uniform white wall (inverse shading map)" src="../images/camera2/metadata/android.statistics.lensShadingMap/inv_shading.png" /></p>
+     * <p>Note that the RAW image data might be subject to lens shading
+     * correction not reported on this map. Query
+     * ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED to see if RAW image data has subject
+     * to lens shading correction. If ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED
+     * is TRUE, the RAW image data is subject to partial or full lens shading
+     * correction. In the case full lens shading correction is applied to RAW
+     * images, the gain factor map reported in this key will contain all 1.0 gains.
+     * In other words, the map reported in this key is the remaining lens shading
+     * that needs to be applied on the RAW image to get images without lens shading
+     * artifacts. See android.request.maxNumOutputRaw for a list of RAW image
+     * formats.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_MODE
+     * @see ACAMERA_LENS_INFO_SHADING_MAP_SIZE
+     * @see ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED
+     * @see ACAMERA_STATISTICS_LENS_SHADING_MAP
+     */
+    ACAMERA_STATISTICS_LENS_SHADING_MAP =                       // float[4*n*m]
+            ACAMERA_STATISTICS_START + 11,
+    /**
+     * <p>The camera device estimated scene illumination lighting
+     * frequency.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>Many light sources, such as most fluorescent lights, flicker at a rate
+     * that depends on the local utility power standards. This flicker must be
+     * accounted for by auto-exposure routines to avoid artifacts in captured images.
+     * The camera device uses this entry to tell the application what the scene
+     * illuminant frequency is.</p>
+     * <p>When manual exposure control is enabled
+     * (<code>ACAMERA_CONTROL_AE_MODE == OFF</code> or <code>ACAMERA_CONTROL_MODE ==
+     * OFF</code>), the ACAMERA_CONTROL_AE_ANTIBANDING_MODE doesn't perform
+     * antibanding, and the application can ensure it selects
+     * exposure times that do not cause banding issues by looking
+     * into this metadata field. See
+     * ACAMERA_CONTROL_AE_ANTIBANDING_MODE for more details.</p>
+     * <p>Reports NONE if there doesn't appear to be flickering illumination.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_ANTIBANDING_MODE
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_MODE
+     */
+    ACAMERA_STATISTICS_SCENE_FLICKER =                          // byte (enum)
+            ACAMERA_STATISTICS_START + 14,
+    /**
+     * <p>List of <code>(x, y)</code> coordinates of hot/defective pixels on the sensor.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>A coordinate <code>(x, y)</code> must lie between <code>(0, 0)</code>, and
+     * <code>(width - 1, height - 1)</code> (inclusive), which are the top-left and
+     * bottom-right of the pixel array, respectively. The width and
+     * height dimensions are given in ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE.
+     * This may include hot pixels that lie outside of the active array
+     * bounds given by ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
+     *
+     * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
+     */
+    ACAMERA_STATISTICS_HOT_PIXEL_MAP =                          // int32[2*n]
+            ACAMERA_STATISTICS_START + 15,
+    /**
+     * <p>Whether the camera device will output the lens
+     * shading map in output result metadata.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When set to ON,
+     * ACAMERA_STATISTICS_LENS_SHADING_MAP will be provided in
+     * the output result metadata.</p>
+     * <p>ON is always supported on devices with the RAW capability.</p>
+     *
+     * @see ACAMERA_STATISTICS_LENS_SHADING_MAP
+     */
+    ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE =                  // byte (enum)
+            ACAMERA_STATISTICS_START + 16,
+    ACAMERA_STATISTICS_END,
+
+    /**
+     * <p>List of face detection modes for ACAMERA_STATISTICS_FACE_DETECT_MODE that are
+     * supported by this camera device.</p>
+     *
+     * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>OFF is always supported.</p>
+     */
+    ACAMERA_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES =       // byte[n]
+            ACAMERA_STATISTICS_INFO_START,
+    /**
+     * <p>The maximum number of simultaneously detectable
+     * faces.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>None</p>
+     */
+    ACAMERA_STATISTICS_INFO_MAX_FACE_COUNT =                    // int32
+            ACAMERA_STATISTICS_INFO_START + 2,
+    /**
+     * <p>List of hot pixel map output modes for ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE that are
+     * supported by this camera device.</p>
+     *
+     * @see ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If no hotpixel map output is available for this camera device, this will contain only
+     * <code>false</code>.</p>
+     * <p>ON is always supported on devices with the RAW capability.</p>
+     */
+    ACAMERA_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES =     // byte[n]
+            ACAMERA_STATISTICS_INFO_START + 6,
+    /**
+     * <p>List of lens shading map output modes for ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE that
+     * are supported by this camera device.</p>
+     *
+     * @see ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If no lens shading map output is available for this camera device, this key will
+     * contain only OFF.</p>
+     * <p>ON is always supported on devices with the RAW capability.
+     * LEGACY mode devices will always only support OFF.</p>
+     */
+    ACAMERA_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES =  // byte[n]
+            ACAMERA_STATISTICS_INFO_START + 7,
+    ACAMERA_STATISTICS_INFO_END,
+
+    /**
+     * <p>Tonemapping / contrast / gamma curve for the blue
+     * channel, to use when ACAMERA_TONEMAP_MODE is
+     * CONTRAST_CURVE.</p>
+     *
+     * @see ACAMERA_TONEMAP_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>See ACAMERA_TONEMAP_CURVE_RED for more details.</p>
+     *
+     * @see ACAMERA_TONEMAP_CURVE_RED
+     */
+    ACAMERA_TONEMAP_CURVE_BLUE =                                // float[n*2]
+            ACAMERA_TONEMAP_START,
+    /**
+     * <p>Tonemapping / contrast / gamma curve for the green
+     * channel, to use when ACAMERA_TONEMAP_MODE is
+     * CONTRAST_CURVE.</p>
+     *
+     * @see ACAMERA_TONEMAP_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>See ACAMERA_TONEMAP_CURVE_RED for more details.</p>
+     *
+     * @see ACAMERA_TONEMAP_CURVE_RED
+     */
+    ACAMERA_TONEMAP_CURVE_GREEN =                               // float[n*2]
+            ACAMERA_TONEMAP_START + 1,
+    /**
+     * <p>Tonemapping / contrast / gamma curve for the red
+     * channel, to use when ACAMERA_TONEMAP_MODE is
+     * CONTRAST_CURVE.</p>
+     *
+     * @see ACAMERA_TONEMAP_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Each channel's curve is defined by an array of control points:</p>
+     * <pre><code>ACAMERA_TONEMAP_CURVE_RED =
+     *   [ P0in, P0out, P1in, P1out, P2in, P2out, P3in, P3out, ..., PNin, PNout ]
+     * 2 &lt;= N &lt;= ACAMERA_TONEMAP_MAX_CURVE_POINTS</code></pre>
+     * <p>These are sorted in order of increasing <code>Pin</code>; it is
+     * required that input values 0.0 and 1.0 are included in the list to
+     * define a complete mapping. For input values between control points,
+     * the camera device must linearly interpolate between the control
+     * points.</p>
+     * <p>Each curve can have an independent number of points, and the number
+     * of points can be less than max (that is, the request doesn't have to
+     * always provide a curve with number of points equivalent to
+     * ACAMERA_TONEMAP_MAX_CURVE_POINTS).</p>
+     * <p>A few examples, and their corresponding graphical mappings; these
+     * only specify the red channel and the precision is limited to 4
+     * digits, for conciseness.</p>
+     * <p>Linear mapping:</p>
+     * <pre><code>ACAMERA_TONEMAP_CURVE_RED = [ 0, 0, 1.0, 1.0 ]
+     * </code></pre>
+     * <p><img alt="Linear mapping curve" src="../images/camera2/metadata/android.tonemap.curveRed/linear_tonemap.png" /></p>
+     * <p>Invert mapping:</p>
+     * <pre><code>ACAMERA_TONEMAP_CURVE_RED = [ 0, 1.0, 1.0, 0 ]
+     * </code></pre>
+     * <p><img alt="Inverting mapping curve" src="../images/camera2/metadata/android.tonemap.curveRed/inverse_tonemap.png" /></p>
+     * <p>Gamma 1/2.2 mapping, with 16 control points:</p>
+     * <pre><code>ACAMERA_TONEMAP_CURVE_RED = [
+     *   0.0000, 0.0000, 0.0667, 0.2920, 0.1333, 0.4002, 0.2000, 0.4812,
+     *   0.2667, 0.5484, 0.3333, 0.6069, 0.4000, 0.6594, 0.4667, 0.7072,
+     *   0.5333, 0.7515, 0.6000, 0.7928, 0.6667, 0.8317, 0.7333, 0.8685,
+     *   0.8000, 0.9035, 0.8667, 0.9370, 0.9333, 0.9691, 1.0000, 1.0000 ]
+     * </code></pre>
+     * <p><img alt="Gamma = 1/2.2 tonemapping curve" src="../images/camera2/metadata/android.tonemap.curveRed/gamma_tonemap.png" /></p>
+     * <p>Standard sRGB gamma mapping, per IEC 61966-2-1:1999, with 16 control points:</p>
+     * <pre><code>ACAMERA_TONEMAP_CURVE_RED = [
+     *   0.0000, 0.0000, 0.0667, 0.2864, 0.1333, 0.4007, 0.2000, 0.4845,
+     *   0.2667, 0.5532, 0.3333, 0.6125, 0.4000, 0.6652, 0.4667, 0.7130,
+     *   0.5333, 0.7569, 0.6000, 0.7977, 0.6667, 0.8360, 0.7333, 0.8721,
+     *   0.8000, 0.9063, 0.8667, 0.9389, 0.9333, 0.9701, 1.0000, 1.0000 ]
+     * </code></pre>
+     * <p><img alt="sRGB tonemapping curve" src="../images/camera2/metadata/android.tonemap.curveRed/srgb_tonemap.png" /></p>
+     *
+     * @see ACAMERA_TONEMAP_CURVE_RED
+     * @see ACAMERA_TONEMAP_MAX_CURVE_POINTS
+     */
+    ACAMERA_TONEMAP_CURVE_RED =                                 // float[n*2]
+            ACAMERA_TONEMAP_START + 2,
+    /**
+     * <p>High-level global contrast/gamma/tonemapping control.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>When switching to an application-defined contrast curve by setting
+     * ACAMERA_TONEMAP_MODE to CONTRAST_CURVE, the curve is defined
+     * per-channel with a set of <code>(in, out)</code> points that specify the
+     * mapping from input high-bit-depth pixel value to the output
+     * low-bit-depth value.  Since the actual pixel ranges of both input
+     * and output may change depending on the camera pipeline, the values
+     * are specified by normalized floating-point numbers.</p>
+     * <p>More-complex color mapping operations such as 3D color look-up
+     * tables, selective chroma enhancement, or other non-linear color
+     * transforms will be disabled when ACAMERA_TONEMAP_MODE is
+     * CONTRAST_CURVE.</p>
+     * <p>When using either FAST or HIGH_QUALITY, the camera device will
+     * emit its own tonemap curve in android.tonemap.curve.
+     * These values are always available, and as close as possible to the
+     * actually used nonlinear/nonglobal transforms.</p>
+     * <p>If a request is sent with CONTRAST_CURVE with the camera device's
+     * provided curve in FAST or HIGH_QUALITY, the image's tonemap will be
+     * roughly the same.</p>
+     *
+     * @see ACAMERA_TONEMAP_MODE
+     */
+    ACAMERA_TONEMAP_MODE =                                      // byte (enum)
+            ACAMERA_TONEMAP_START + 3,
+    /**
+     * <p>Maximum number of supported points in the
+     * tonemap curve that can be used for android.tonemap.curve.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If the actual number of points provided by the application (in ACAMERA_TONEMAPCURVE_*) is
+     * less than this maximum, the camera device will resample the curve to its internal
+     * representation, using linear interpolation.</p>
+     * <p>The output curves in the result metadata may have a different number
+     * of points than the input curves, and will represent the actual
+     * hardware curves used as closely as possible when linearly interpolated.</p>
+     */
+    ACAMERA_TONEMAP_MAX_CURVE_POINTS =                          // int32
+            ACAMERA_TONEMAP_START + 4,
+    /**
+     * <p>List of tonemapping modes for ACAMERA_TONEMAP_MODE that are supported by this camera
+     * device.</p>
+     *
+     * @see ACAMERA_TONEMAP_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>Camera devices that support the MANUAL_POST_PROCESSING capability will always contain
+     * at least one of below mode combinations:</p>
+     * <ul>
+     * <li>CONTRAST_CURVE, FAST and HIGH_QUALITY</li>
+     * <li>GAMMA_VALUE, PRESET_CURVE, FAST and HIGH_QUALITY</li>
+     * </ul>
+     * <p>This includes all FULL level devices.</p>
+     */
+    ACAMERA_TONEMAP_AVAILABLE_TONE_MAP_MODES =                  // byte[n]
+            ACAMERA_TONEMAP_START + 5,
+    /**
+     * <p>Tonemapping curve to use when ACAMERA_TONEMAP_MODE is
+     * GAMMA_VALUE</p>
+     *
+     * @see ACAMERA_TONEMAP_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>The tonemap curve will be defined the following formula:
+     * * OUT = pow(IN, 1.0 / gamma)
+     * where IN and OUT is the input pixel value scaled to range [0.0, 1.0],
+     * pow is the power function and gamma is the gamma value specified by this
+     * key.</p>
+     * <p>The same curve will be applied to all color channels. The camera device
+     * may clip the input gamma value to its supported range. The actual applied
+     * value will be returned in capture result.</p>
+     * <p>The valid range of gamma value varies on different devices, but values
+     * within [1.0, 5.0] are guaranteed not to be clipped.</p>
+     */
+    ACAMERA_TONEMAP_GAMMA =                                     // float
+            ACAMERA_TONEMAP_START + 6,
+    /**
+     * <p>Tonemapping curve to use when ACAMERA_TONEMAP_MODE is
+     * PRESET_CURVE</p>
+     *
+     * @see ACAMERA_TONEMAP_MODE
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>The tonemap curve will be defined by specified standard.</p>
+     * <p>sRGB (approximated by 16 control points):</p>
+     * <p><img alt="sRGB tonemapping curve" src="../images/camera2/metadata/android.tonemap.curveRed/srgb_tonemap.png" /></p>
+     * <p>Rec. 709 (approximated by 16 control points):</p>
+     * <p><img alt="Rec. 709 tonemapping curve" src="../images/camera2/metadata/android.tonemap.curveRed/rec709_tonemap.png" /></p>
+     * <p>Note that above figures show a 16 control points approximation of preset
+     * curves. Camera devices may apply a different approximation to the curve.</p>
+     */
+    ACAMERA_TONEMAP_PRESET_CURVE =                              // byte (enum)
+            ACAMERA_TONEMAP_START + 7,
+    ACAMERA_TONEMAP_END,
+
+    /**
+     * <p>Generally classifies the overall set of the camera device functionality.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>The supported hardware level is a high-level description of the camera device's
+     * capabilities, summarizing several capabilities into one field.  Each level adds additional
+     * features to the previous one, and is always a strict superset of the previous level.
+     * The ordering is <code>LEGACY &lt; LIMITED &lt; FULL &lt; LEVEL_3</code>.</p>
+     * <p>Starting from <code>LEVEL_3</code>, the level enumerations are guaranteed to be in increasing
+     * numerical value as well. To check if a given device is at least at a given hardware level,
+     * the following code snippet can be used:</p>
+     * <pre><code>// Returns true if the device supports the required hardware level, or better.
+     * boolean isHardwareLevelSupported(CameraCharacteristics c, int requiredLevel) {
+     *     int deviceLevel = c.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL);
+     *     if (deviceLevel == CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) {
+     *         return requiredLevel == deviceLevel;
+     *     }
+     *     // deviceLevel is not LEGACY, can use numerical sort
+     *     return requiredLevel &lt;= deviceLevel;
+     * }
+     * </code></pre>
+     * <p>At a high level, the levels are:</p>
+     * <ul>
+     * <li><code>LEGACY</code> devices operate in a backwards-compatibility mode for older
+     *   Android devices, and have very limited capabilities.</li>
+     * <li><code>LIMITED</code> devices represent the
+     *   baseline feature set, and may also include additional capabilities that are
+     *   subsets of <code>FULL</code>.</li>
+     * <li><code>FULL</code> devices additionally support per-frame manual control of sensor, flash, lens and
+     *   post-processing settings, and image capture at a high rate.</li>
+     * <li><code>LEVEL_3</code> devices additionally support YUV reprocessing and RAW image capture, along
+     *   with additional output stream configurations.</li>
+     * </ul>
+     * <p>See the individual level enums for full descriptions of the supported capabilities.  The
+     * ACAMERA_REQUEST_AVAILABLE_CAPABILITIES entry describes the device's capabilities at a
+     * finer-grain level, if needed. In addition, many controls have their available settings or
+     * ranges defined in individual metadata tag entries in this document.</p>
+     * <p>Some features are not part of any particular hardware level or capability and must be
+     * queried separately. These include:</p>
+     * <ul>
+     * <li>Calibrated timestamps (ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE <code>==</code> REALTIME)</li>
+     * <li>Precision lens control (ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION <code>==</code> CALIBRATED)</li>
+     * <li>Face detection (ACAMERA_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES)</li>
+     * <li>Optical or electrical image stabilization
+     *   (ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+     *    ACAMERA_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES)</li>
+     * </ul>
+     *
+     * @see ACAMERA_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES
+     * @see ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION
+     * @see ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     * @see ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE
+     * @see ACAMERA_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES
+     */
+    ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL =                     // byte (enum)
+            ACAMERA_INFO_START,
+    ACAMERA_INFO_END,
+
+    /**
+     * <p>Whether black-level compensation is locked
+     * to its current values, or is free to vary.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     *   <li>ACaptureRequest</li>
+     * </ul>
+     *
+     * <p>Whether the black level offset was locked for this frame.  Should be
+     * ON if ACAMERA_BLACK_LEVEL_LOCK was ON in the capture request, unless
+     * a change in other capture settings forced the camera device to
+     * perform a black level reset.</p>
+     *
+     * @see ACAMERA_BLACK_LEVEL_LOCK
+     */
+    ACAMERA_BLACK_LEVEL_LOCK =                                  // byte (enum)
+            ACAMERA_BLACK_LEVEL_START,
+    ACAMERA_BLACK_LEVEL_END,
+
+    /**
+     * <p>The frame number corresponding to the last request
+     * with which the output result (metadata + buffers) has been fully
+     * synchronized.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul>
+     *
+     * <p>When a request is submitted to the camera device, there is usually a
+     * delay of several frames before the controls get applied. A camera
+     * device may either choose to account for this delay by implementing a
+     * pipeline and carefully submit well-timed atomic control updates, or
+     * it may start streaming control changes that span over several frame
+     * boundaries.</p>
+     * <p>In the latter case, whenever a request's settings change relative to
+     * the previous submitted request, the full set of changes may take
+     * multiple frame durations to fully take effect. Some settings may
+     * take effect sooner (in less frame durations) than others.</p>
+     * <p>While a set of control changes are being propagated, this value
+     * will be CONVERGING.</p>
+     * <p>Once it is fully known that a set of control changes have been
+     * finished propagating, and the resulting updated control settings
+     * have been read back by the camera device, this value will be set
+     * to a non-negative frame number (corresponding to the request to
+     * which the results have synchronized to).</p>
+     * <p>Older camera device implementations may not have a way to detect
+     * when all camera controls have been applied, and will always set this
+     * value to UNKNOWN.</p>
+     * <p>FULL capability devices will always have this value set to the
+     * frame number of the request corresponding to this result.</p>
+     * <p><em>Further details</em>:</p>
+     * <ul>
+     * <li>Whenever a request differs from the last request, any future
+     * results not yet returned may have this value set to CONVERGING (this
+     * could include any in-progress captures not yet returned by the camera
+     * device, for more details see pipeline considerations below).</li>
+     * <li>Submitting a series of multiple requests that differ from the
+     * previous request (e.g. r1, r2, r3 s.t. r1 != r2 != r3)
+     * moves the new synchronization frame to the last non-repeating
+     * request (using the smallest frame number from the contiguous list of
+     * repeating requests).</li>
+     * <li>Submitting the same request repeatedly will not change this value
+     * to CONVERGING, if it was already a non-negative value.</li>
+     * <li>When this value changes to non-negative, that means that all of the
+     * metadata controls from the request have been applied, all of the
+     * metadata controls from the camera device have been read to the
+     * updated values (into the result), and all of the graphics buffers
+     * corresponding to this result are also synchronized to the request.</li>
+     * </ul>
+     * <p><em>Pipeline considerations</em>:</p>
+     * <p>Submitting a request with updated controls relative to the previously
+     * submitted requests may also invalidate the synchronization state
+     * of all the results corresponding to currently in-flight requests.</p>
+     * <p>In other words, results for this current request and up to
+     * ACAMERA_REQUEST_PIPELINE_MAX_DEPTH prior requests may have their
+     * ACAMERA_SYNC_FRAME_NUMBER change to CONVERGING.</p>
+     *
+     * @see ACAMERA_REQUEST_PIPELINE_MAX_DEPTH
+     * @see ACAMERA_SYNC_FRAME_NUMBER
+     */
+    ACAMERA_SYNC_FRAME_NUMBER =                                 // int64 (enum)
+            ACAMERA_SYNC_START,
+    /**
+     * <p>The maximum number of frames that can occur after a request
+     * (different than the previous) has been submitted, and before the
+     * result's state becomes synchronized.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This defines the maximum distance (in number of metadata results),
+     * between the frame number of the request that has new controls to apply
+     * and the frame number of the result that has all the controls applied.</p>
+     * <p>In other words this acts as an upper boundary for how many frames
+     * must occur before the camera device knows for a fact that the new
+     * submitted camera settings have been applied in outgoing frames.</p>
+     */
+    ACAMERA_SYNC_MAX_LATENCY =                                  // int32 (enum)
+            ACAMERA_SYNC_START + 1,
+    ACAMERA_SYNC_END,
+
+    /**
+     * <p>The available depth dataspace stream
+     * configurations that this camera device supports
+     * (i.e. format, width, height, output/input stream).</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>These are output stream configurations for use with
+     * dataSpace HAL_DATASPACE_DEPTH. The configurations are
+     * listed as <code>(format, width, height, input?)</code> tuples.</p>
+     * <p>Only devices that support depth output for at least
+     * the HAL_PIXEL_FORMAT_Y16 dense depth map may include
+     * this entry.</p>
+     * <p>A device that also supports the HAL_PIXEL_FORMAT_BLOB
+     * sparse depth point cloud must report a single entry for
+     * the format in this list as <code>(HAL_PIXEL_FORMAT_BLOB,
+     * android.depth.maxDepthSamples, 1, OUTPUT)</code> in addition to
+     * the entries for HAL_PIXEL_FORMAT_Y16.</p>
+     */
+    ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS =       // int32[n*4] (enum)
+            ACAMERA_DEPTH_START + 1,
+    /**
+     * <p>This lists the minimum frame duration for each
+     * format/size combination for depth output formats.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>This should correspond to the frame duration when only that
+     * stream is active, with all processing (typically in android.*.mode)
+     * set to either OFF or FAST.</p>
+     * <p>When multiple streams are used in a request, the minimum frame
+     * duration will be max(individual stream min durations).</p>
+     * <p>The minimum frame duration of a stream (of a particular format, size)
+     * is the same regardless of whether the stream is input or output.</p>
+     * <p>See ACAMERA_SENSOR_FRAME_DURATION and
+     * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for more details about
+     * calculating the max frame rate.</p>
+     *
+     * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     */
+    ACAMERA_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS =         // int64[4*n]
+            ACAMERA_DEPTH_START + 2,
+    /**
+     * <p>This lists the maximum stall duration for each
+     * output format/size combination for depth streams.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>A stall duration is how much extra time would get added
+     * to the normal minimum frame duration for a repeating request
+     * that has streams with non-zero stall.</p>
+     * <p>This functions similarly to
+     * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for depth
+     * streams.</p>
+     * <p>All depth output stream formats may have a nonzero stall
+     * duration.</p>
+     *
+     * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+     */
+    ACAMERA_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS =             // int64[4*n]
+            ACAMERA_DEPTH_START + 3,
+    /**
+     * <p>Indicates whether a capture request may target both a
+     * DEPTH16 / DEPTH_POINT_CLOUD output, and normal color outputs (such as
+     * YUV_420_888, JPEG, or RAW) simultaneously.</p>
+     *
+     * <p>This tag may appear in:</p>
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul>
+     *
+     * <p>If TRUE, including both depth and color outputs in a single
+     * capture request is not supported. An application must interleave color
+     * and depth requests.  If FALSE, a single request can target both types
+     * of output.</p>
+     * <p>Typically, this restriction exists on camera devices that
+     * need to emit a specific pattern or wavelength of light to
+     * measure depth values, which causes the color image to be
+     * corrupted during depth measurement.</p>
+     */
+    ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE =                          // byte (enum)
+            ACAMERA_DEPTH_START + 4,
+    ACAMERA_DEPTH_END,
+
+} acamera_metadata_tag_t;
+
+/**
+ * Enumeration definitions for the various entries that need them
+ */
+
+// ACAMERA_COLOR_CORRECTION_MODE
+typedef enum acamera_metadata_enum_acamera_color_correction_mode {
+    /**
+     * <p>Use the ACAMERA_COLOR_CORRECTION_TRANSFORM matrix
+     * and ACAMERA_COLOR_CORRECTION_GAINS to do color conversion.</p>
+     * <p>All advanced white balance adjustments (not specified
+     * by our white balance pipeline) must be disabled.</p>
+     * <p>If AWB is enabled with <code>ACAMERA_CONTROL_AWB_MODE != OFF</code>, then
+     * TRANSFORM_MATRIX is ignored. The camera device will override
+     * this value to either FAST or HIGH_QUALITY.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * @see ACAMERA_CONTROL_AWB_MODE
+     */
+    ACAMERA_COLOR_CORRECTION_MODE_TRANSFORM_MATRIX                   = 0,
+
+    /**
+     * <p>Color correction processing must not slow down
+     * capture rate relative to sensor raw output.</p>
+     * <p>Advanced white balance adjustments above and beyond
+     * the specified white balance pipeline may be applied.</p>
+     * <p>If AWB is enabled with <code>ACAMERA_CONTROL_AWB_MODE != OFF</code>, then
+     * the camera device uses the last frame's AWB values
+     * (or defaults if AWB has never been run).</p>
+     *
+     * @see ACAMERA_CONTROL_AWB_MODE
+     */
+    ACAMERA_COLOR_CORRECTION_MODE_FAST                               = 1,
+
+    /**
+     * <p>Color correction processing operates at improved
+     * quality but the capture rate might be reduced (relative to sensor
+     * raw output rate)</p>
+     * <p>Advanced white balance adjustments above and beyond
+     * the specified white balance pipeline may be applied.</p>
+     * <p>If AWB is enabled with <code>ACAMERA_CONTROL_AWB_MODE != OFF</code>, then
+     * the camera device uses the last frame's AWB values
+     * (or defaults if AWB has never been run).</p>
+     *
+     * @see ACAMERA_CONTROL_AWB_MODE
+     */
+    ACAMERA_COLOR_CORRECTION_MODE_HIGH_QUALITY                       = 2,
+
+} acamera_metadata_enum_android_color_correction_mode_t;
+
+// ACAMERA_COLOR_CORRECTION_ABERRATION_MODE
+typedef enum acamera_metadata_enum_acamera_color_correction_aberration_mode {
+    /**
+     * <p>No aberration correction is applied.</p>
+     */
+    ACAMERA_COLOR_CORRECTION_ABERRATION_MODE_OFF                     = 0,
+
+    /**
+     * <p>Aberration correction will not slow down capture rate
+     * relative to sensor raw output.</p>
+     */
+    ACAMERA_COLOR_CORRECTION_ABERRATION_MODE_FAST                    = 1,
+
+    /**
+     * <p>Aberration correction operates at improved quality but the capture rate might be
+     * reduced (relative to sensor raw output rate)</p>
+     */
+    ACAMERA_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY            = 2,
+
+} acamera_metadata_enum_android_color_correction_aberration_mode_t;
+
+
+// ACAMERA_CONTROL_AE_ANTIBANDING_MODE
+typedef enum acamera_metadata_enum_acamera_control_ae_antibanding_mode {
+    /**
+     * <p>The camera device will not adjust exposure duration to
+     * avoid banding problems.</p>
+     */
+    ACAMERA_CONTROL_AE_ANTIBANDING_MODE_OFF                          = 0,
+
+    /**
+     * <p>The camera device will adjust exposure duration to
+     * avoid banding problems with 50Hz illumination sources.</p>
+     */
+    ACAMERA_CONTROL_AE_ANTIBANDING_MODE_50HZ                         = 1,
+
+    /**
+     * <p>The camera device will adjust exposure duration to
+     * avoid banding problems with 60Hz illumination
+     * sources.</p>
+     */
+    ACAMERA_CONTROL_AE_ANTIBANDING_MODE_60HZ                         = 2,
+
+    /**
+     * <p>The camera device will automatically adapt its
+     * antibanding routine to the current illumination
+     * condition. This is the default mode if AUTO is
+     * available on given camera device.</p>
+     */
+    ACAMERA_CONTROL_AE_ANTIBANDING_MODE_AUTO                         = 3,
+
+} acamera_metadata_enum_android_control_ae_antibanding_mode_t;
+
+// ACAMERA_CONTROL_AE_LOCK
+typedef enum acamera_metadata_enum_acamera_control_ae_lock {
+    /**
+     * <p>Auto-exposure lock is disabled; the AE algorithm
+     * is free to update its parameters.</p>
+     */
+    ACAMERA_CONTROL_AE_LOCK_OFF                                      = 0,
+
+    /**
+     * <p>Auto-exposure lock is enabled; the AE algorithm
+     * must not update the exposure and sensitivity parameters
+     * while the lock is active.</p>
+     * <p>ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION setting changes
+     * will still take effect while auto-exposure is locked.</p>
+     * <p>Some rare LEGACY devices may not support
+     * this, in which case the value will always be overridden to OFF.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION
+     */
+    ACAMERA_CONTROL_AE_LOCK_ON                                       = 1,
+
+} acamera_metadata_enum_android_control_ae_lock_t;
+
+// ACAMERA_CONTROL_AE_MODE
+typedef enum acamera_metadata_enum_acamera_control_ae_mode {
+    /**
+     * <p>The camera device's autoexposure routine is disabled.</p>
+     * <p>The application-selected ACAMERA_SENSOR_EXPOSURE_TIME,
+     * ACAMERA_SENSOR_SENSITIVITY and
+     * ACAMERA_SENSOR_FRAME_DURATION are used by the camera
+     * device, along with ACAMERA_FLASH_* fields, if there's
+     * a flash unit for this camera device.</p>
+     * <p>Note that auto-white balance (AWB) and auto-focus (AF)
+     * behavior is device dependent when AE is in OFF mode.
+     * To have consistent behavior across different devices,
+     * it is recommended to either set AWB and AF to OFF mode
+     * or lock AWB and AF before setting AE to OFF.
+     * See ACAMERA_CONTROL_AWB_MODE, ACAMERA_CONTROL_AF_MODE,
+     * ACAMERA_CONTROL_AWB_LOCK, and ACAMERA_CONTROL_AF_TRIGGER
+     * for more details.</p>
+     * <p>LEGACY devices do not support the OFF mode and will
+     * override attempts to use this value to ON.</p>
+     *
+     * @see ACAMERA_CONTROL_AF_MODE
+     * @see ACAMERA_CONTROL_AF_TRIGGER
+     * @see ACAMERA_CONTROL_AWB_LOCK
+     * @see ACAMERA_CONTROL_AWB_MODE
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_CONTROL_AE_MODE_OFF                                      = 0,
+
+    /**
+     * <p>The camera device's autoexposure routine is active,
+     * with no flash control.</p>
+     * <p>The application's values for
+     * ACAMERA_SENSOR_EXPOSURE_TIME,
+     * ACAMERA_SENSOR_SENSITIVITY, and
+     * ACAMERA_SENSOR_FRAME_DURATION are ignored. The
+     * application has control over the various
+     * ACAMERA_FLASH_* fields.</p>
+     *
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_CONTROL_AE_MODE_ON                                       = 1,
+
+    /**
+     * <p>Like ON, except that the camera device also controls
+     * the camera's flash unit, firing it in low-light
+     * conditions.</p>
+     * <p>The flash may be fired during a precapture sequence
+     * (triggered by ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER) and
+     * may be fired for captures for which the
+     * ACAMERA_CONTROL_CAPTURE_INTENT field is set to
+     * STILL_CAPTURE</p>
+     *
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     */
+    ACAMERA_CONTROL_AE_MODE_ON_AUTO_FLASH                            = 2,
+
+    /**
+     * <p>Like ON, except that the camera device also controls
+     * the camera's flash unit, always firing it for still
+     * captures.</p>
+     * <p>The flash may be fired during a precapture sequence
+     * (triggered by ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER) and
+     * will always be fired for captures for which the
+     * ACAMERA_CONTROL_CAPTURE_INTENT field is set to
+     * STILL_CAPTURE</p>
+     *
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     */
+    ACAMERA_CONTROL_AE_MODE_ON_ALWAYS_FLASH                          = 3,
+
+    /**
+     * <p>Like ON_AUTO_FLASH, but with automatic red eye
+     * reduction.</p>
+     * <p>If deemed necessary by the camera device, a red eye
+     * reduction flash will fire during the precapture
+     * sequence.</p>
+     */
+    ACAMERA_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE                     = 4,
+
+} acamera_metadata_enum_android_control_ae_mode_t;
+
+// ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+typedef enum acamera_metadata_enum_acamera_control_ae_precapture_trigger {
+    /**
+     * <p>The trigger is idle.</p>
+     */
+    ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE                       = 0,
+
+    /**
+     * <p>The precapture metering sequence will be started
+     * by the camera device.</p>
+     * <p>The exact effect of the precapture trigger depends on
+     * the current AE mode and state.</p>
+     */
+    ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER_START                      = 1,
+
+    /**
+     * <p>The camera device will cancel any currently active or completed
+     * precapture metering sequence, the auto-exposure routine will return to its
+     * initial state.</p>
+     */
+    ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL                     = 2,
+
+} acamera_metadata_enum_android_control_ae_precapture_trigger_t;
+
+// ACAMERA_CONTROL_AF_MODE
+typedef enum acamera_metadata_enum_acamera_control_af_mode {
+    /**
+     * <p>The auto-focus routine does not control the lens;
+     * ACAMERA_LENS_FOCUS_DISTANCE is controlled by the
+     * application.</p>
+     *
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     */
+    ACAMERA_CONTROL_AF_MODE_OFF                                      = 0,
+
+    /**
+     * <p>Basic automatic focus mode.</p>
+     * <p>In this mode, the lens does not move unless
+     * the autofocus trigger action is called. When that trigger
+     * is activated, AF will transition to ACTIVE_SCAN, then to
+     * the outcome of the scan (FOCUSED or NOT_FOCUSED).</p>
+     * <p>Always supported if lens is not fixed focus.</p>
+     * <p>Use ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE to determine if lens
+     * is fixed-focus.</p>
+     * <p>Triggering AF_CANCEL resets the lens position to default,
+     * and sets the AF state to INACTIVE.</p>
+     *
+     * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
+     */
+    ACAMERA_CONTROL_AF_MODE_AUTO                                     = 1,
+
+    /**
+     * <p>Close-up focusing mode.</p>
+     * <p>In this mode, the lens does not move unless the
+     * autofocus trigger action is called. When that trigger is
+     * activated, AF will transition to ACTIVE_SCAN, then to
+     * the outcome of the scan (FOCUSED or NOT_FOCUSED). This
+     * mode is optimized for focusing on objects very close to
+     * the camera.</p>
+     * <p>When that trigger is activated, AF will transition to
+     * ACTIVE_SCAN, then to the outcome of the scan (FOCUSED or
+     * NOT_FOCUSED). Triggering cancel AF resets the lens
+     * position to default, and sets the AF state to
+     * INACTIVE.</p>
+     */
+    ACAMERA_CONTROL_AF_MODE_MACRO                                    = 2,
+
+    /**
+     * <p>In this mode, the AF algorithm modifies the lens
+     * position continually to attempt to provide a
+     * constantly-in-focus image stream.</p>
+     * <p>The focusing behavior should be suitable for good quality
+     * video recording; typically this means slower focus
+     * movement and no overshoots. When the AF trigger is not
+     * involved, the AF algorithm should start in INACTIVE state,
+     * and then transition into PASSIVE_SCAN and PASSIVE_FOCUSED
+     * states as appropriate. When the AF trigger is activated,
+     * the algorithm should immediately transition into
+     * AF_FOCUSED or AF_NOT_FOCUSED as appropriate, and lock the
+     * lens position until a cancel AF trigger is received.</p>
+     * <p>Once cancel is received, the algorithm should transition
+     * back to INACTIVE and resume passive scan. Note that this
+     * behavior is not identical to CONTINUOUS_PICTURE, since an
+     * ongoing PASSIVE_SCAN must immediately be
+     * canceled.</p>
+     */
+    ACAMERA_CONTROL_AF_MODE_CONTINUOUS_VIDEO                         = 3,
+
+    /**
+     * <p>In this mode, the AF algorithm modifies the lens
+     * position continually to attempt to provide a
+     * constantly-in-focus image stream.</p>
+     * <p>The focusing behavior should be suitable for still image
+     * capture; typically this means focusing as fast as
+     * possible. When the AF trigger is not involved, the AF
+     * algorithm should start in INACTIVE state, and then
+     * transition into PASSIVE_SCAN and PASSIVE_FOCUSED states as
+     * appropriate as it attempts to maintain focus. When the AF
+     * trigger is activated, the algorithm should finish its
+     * PASSIVE_SCAN if active, and then transition into
+     * AF_FOCUSED or AF_NOT_FOCUSED as appropriate, and lock the
+     * lens position until a cancel AF trigger is received.</p>
+     * <p>When the AF cancel trigger is activated, the algorithm
+     * should transition back to INACTIVE and then act as if it
+     * has just been started.</p>
+     */
+    ACAMERA_CONTROL_AF_MODE_CONTINUOUS_PICTURE                       = 4,
+
+    /**
+     * <p>Extended depth of field (digital focus) mode.</p>
+     * <p>The camera device will produce images with an extended
+     * depth of field automatically; no special focusing
+     * operations need to be done before taking a picture.</p>
+     * <p>AF triggers are ignored, and the AF state will always be
+     * INACTIVE.</p>
+     */
+    ACAMERA_CONTROL_AF_MODE_EDOF                                     = 5,
+
+} acamera_metadata_enum_android_control_af_mode_t;
+
+// ACAMERA_CONTROL_AF_TRIGGER
+typedef enum acamera_metadata_enum_acamera_control_af_trigger {
+    /**
+     * <p>The trigger is idle.</p>
+     */
+    ACAMERA_CONTROL_AF_TRIGGER_IDLE                                  = 0,
+
+    /**
+     * <p>Autofocus will trigger now.</p>
+     */
+    ACAMERA_CONTROL_AF_TRIGGER_START                                 = 1,
+
+    /**
+     * <p>Autofocus will return to its initial
+     * state, and cancel any currently active trigger.</p>
+     */
+    ACAMERA_CONTROL_AF_TRIGGER_CANCEL                                = 2,
+
+} acamera_metadata_enum_android_control_af_trigger_t;
+
+// ACAMERA_CONTROL_AWB_LOCK
+typedef enum acamera_metadata_enum_acamera_control_awb_lock {
+    /**
+     * <p>Auto-white balance lock is disabled; the AWB
+     * algorithm is free to update its parameters if in AUTO
+     * mode.</p>
+     */
+    ACAMERA_CONTROL_AWB_LOCK_OFF                                     = 0,
+
+    /**
+     * <p>Auto-white balance lock is enabled; the AWB
+     * algorithm will not update its parameters while the lock
+     * is active.</p>
+     */
+    ACAMERA_CONTROL_AWB_LOCK_ON                                      = 1,
+
+} acamera_metadata_enum_android_control_awb_lock_t;
+
+// ACAMERA_CONTROL_AWB_MODE
+typedef enum acamera_metadata_enum_acamera_control_awb_mode {
+    /**
+     * <p>The camera device's auto-white balance routine is disabled.</p>
+     * <p>The application-selected color transform matrix
+     * (ACAMERA_COLOR_CORRECTION_TRANSFORM) and gains
+     * (ACAMERA_COLOR_CORRECTION_GAINS) are used by the camera
+     * device for manual white balance control.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_OFF                                     = 0,
+
+    /**
+     * <p>The camera device's auto-white balance routine is active.</p>
+     * <p>The application's values for ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * and ACAMERA_COLOR_CORRECTION_GAINS are ignored.
+     * For devices that support the MANUAL_POST_PROCESSING capability, the
+     * values used by the camera device for the transform and gains
+     * will be available in the capture result for this request.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_AUTO                                    = 1,
+
+    /**
+     * <p>The camera device's auto-white balance routine is disabled;
+     * the camera device uses incandescent light as the assumed scene
+     * illumination for white balance.</p>
+     * <p>While the exact white balance transforms are up to the
+     * camera device, they will approximately match the CIE
+     * standard illuminant A.</p>
+     * <p>The application's values for ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * and ACAMERA_COLOR_CORRECTION_GAINS are ignored.
+     * For devices that support the MANUAL_POST_PROCESSING capability, the
+     * values used by the camera device for the transform and gains
+     * will be available in the capture result for this request.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_INCANDESCENT                            = 2,
+
+    /**
+     * <p>The camera device's auto-white balance routine is disabled;
+     * the camera device uses fluorescent light as the assumed scene
+     * illumination for white balance.</p>
+     * <p>While the exact white balance transforms are up to the
+     * camera device, they will approximately match the CIE
+     * standard illuminant F2.</p>
+     * <p>The application's values for ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * and ACAMERA_COLOR_CORRECTION_GAINS are ignored.
+     * For devices that support the MANUAL_POST_PROCESSING capability, the
+     * values used by the camera device for the transform and gains
+     * will be available in the capture result for this request.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_FLUORESCENT                             = 3,
+
+    /**
+     * <p>The camera device's auto-white balance routine is disabled;
+     * the camera device uses warm fluorescent light as the assumed scene
+     * illumination for white balance.</p>
+     * <p>While the exact white balance transforms are up to the
+     * camera device, they will approximately match the CIE
+     * standard illuminant F4.</p>
+     * <p>The application's values for ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * and ACAMERA_COLOR_CORRECTION_GAINS are ignored.
+     * For devices that support the MANUAL_POST_PROCESSING capability, the
+     * values used by the camera device for the transform and gains
+     * will be available in the capture result for this request.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_WARM_FLUORESCENT                        = 4,
+
+    /**
+     * <p>The camera device's auto-white balance routine is disabled;
+     * the camera device uses daylight light as the assumed scene
+     * illumination for white balance.</p>
+     * <p>While the exact white balance transforms are up to the
+     * camera device, they will approximately match the CIE
+     * standard illuminant D65.</p>
+     * <p>The application's values for ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * and ACAMERA_COLOR_CORRECTION_GAINS are ignored.
+     * For devices that support the MANUAL_POST_PROCESSING capability, the
+     * values used by the camera device for the transform and gains
+     * will be available in the capture result for this request.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_DAYLIGHT                                = 5,
+
+    /**
+     * <p>The camera device's auto-white balance routine is disabled;
+     * the camera device uses cloudy daylight light as the assumed scene
+     * illumination for white balance.</p>
+     * <p>The application's values for ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * and ACAMERA_COLOR_CORRECTION_GAINS are ignored.
+     * For devices that support the MANUAL_POST_PROCESSING capability, the
+     * values used by the camera device for the transform and gains
+     * will be available in the capture result for this request.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT                         = 6,
+
+    /**
+     * <p>The camera device's auto-white balance routine is disabled;
+     * the camera device uses twilight light as the assumed scene
+     * illumination for white balance.</p>
+     * <p>The application's values for ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * and ACAMERA_COLOR_CORRECTION_GAINS are ignored.
+     * For devices that support the MANUAL_POST_PROCESSING capability, the
+     * values used by the camera device for the transform and gains
+     * will be available in the capture result for this request.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_TWILIGHT                                = 7,
+
+    /**
+     * <p>The camera device's auto-white balance routine is disabled;
+     * the camera device uses shade light as the assumed scene
+     * illumination for white balance.</p>
+     * <p>The application's values for ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * and ACAMERA_COLOR_CORRECTION_GAINS are ignored.
+     * For devices that support the MANUAL_POST_PROCESSING capability, the
+     * values used by the camera device for the transform and gains
+     * will be available in the capture result for this request.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     */
+    ACAMERA_CONTROL_AWB_MODE_SHADE                                   = 8,
+
+} acamera_metadata_enum_android_control_awb_mode_t;
+
+// ACAMERA_CONTROL_CAPTURE_INTENT
+typedef enum acamera_metadata_enum_acamera_control_capture_intent {
+    /**
+     * <p>The goal of this request doesn't fall into the other
+     * categories. The camera device will default to preview-like
+     * behavior.</p>
+     */
+    ACAMERA_CONTROL_CAPTURE_INTENT_CUSTOM                            = 0,
+
+    /**
+     * <p>This request is for a preview-like use case.</p>
+     * <p>The precapture trigger may be used to start off a metering
+     * w/flash sequence.</p>
+     */
+    ACAMERA_CONTROL_CAPTURE_INTENT_PREVIEW                           = 1,
+
+    /**
+     * <p>This request is for a still capture-type
+     * use case.</p>
+     * <p>If the flash unit is under automatic control, it may fire as needed.</p>
+     */
+    ACAMERA_CONTROL_CAPTURE_INTENT_STILL_CAPTURE                     = 2,
+
+    /**
+     * <p>This request is for a video recording
+     * use case.</p>
+     */
+    ACAMERA_CONTROL_CAPTURE_INTENT_VIDEO_RECORD                      = 3,
+
+    /**
+     * <p>This request is for a video snapshot (still
+     * image while recording video) use case.</p>
+     * <p>The camera device should take the highest-quality image
+     * possible (given the other settings) without disrupting the
+     * frame rate of video recording.  </p>
+     */
+    ACAMERA_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT                    = 4,
+
+    /**
+     * <p>This request is for a ZSL usecase; the
+     * application will stream full-resolution images and
+     * reprocess one or several later for a final
+     * capture.</p>
+     */
+    ACAMERA_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG                  = 5,
+
+    /**
+     * <p>This request is for manual capture use case where
+     * the applications want to directly control the capture parameters.</p>
+     * <p>For example, the application may wish to manually control
+     * ACAMERA_SENSOR_EXPOSURE_TIME, ACAMERA_SENSOR_SENSITIVITY, etc.</p>
+     *
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_CONTROL_CAPTURE_INTENT_MANUAL                            = 6,
+
+} acamera_metadata_enum_android_control_capture_intent_t;
+
+// ACAMERA_CONTROL_EFFECT_MODE
+typedef enum acamera_metadata_enum_acamera_control_effect_mode {
+    /**
+     * <p>No color effect will be applied.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_OFF                                  = 0,
+
+    /**
+     * <p>A "monocolor" effect where the image is mapped into
+     * a single color.</p>
+     * <p>This will typically be grayscale.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_MONO                                 = 1,
+
+    /**
+     * <p>A "photo-negative" effect where the image's colors
+     * are inverted.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_NEGATIVE                             = 2,
+
+    /**
+     * <p>A "solarisation" effect (Sabattier effect) where the
+     * image is wholly or partially reversed in
+     * tone.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_SOLARIZE                             = 3,
+
+    /**
+     * <p>A "sepia" effect where the image is mapped into warm
+     * gray, red, and brown tones.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_SEPIA                                = 4,
+
+    /**
+     * <p>A "posterization" effect where the image uses
+     * discrete regions of tone rather than a continuous
+     * gradient of tones.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_POSTERIZE                            = 5,
+
+    /**
+     * <p>A "whiteboard" effect where the image is typically displayed
+     * as regions of white, with black or grey details.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_WHITEBOARD                           = 6,
+
+    /**
+     * <p>A "blackboard" effect where the image is typically displayed
+     * as regions of black, with white or grey details.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_BLACKBOARD                           = 7,
+
+    /**
+     * <p>An "aqua" effect where a blue hue is added to the image.</p>
+     */
+    ACAMERA_CONTROL_EFFECT_MODE_AQUA                                 = 8,
+
+} acamera_metadata_enum_android_control_effect_mode_t;
+
+// ACAMERA_CONTROL_MODE
+typedef enum acamera_metadata_enum_acamera_control_mode {
+    /**
+     * <p>Full application control of pipeline.</p>
+     * <p>All control by the device's metering and focusing (3A)
+     * routines is disabled, and no other settings in
+     * ACAMERA_CONTROL_* have any effect, except that
+     * ACAMERA_CONTROL_CAPTURE_INTENT may be used by the camera
+     * device to select post-processing values for processing
+     * blocks that do not allow for manual control, or are not
+     * exposed by the camera API.</p>
+     * <p>However, the camera device's 3A routines may continue to
+     * collect statistics and update their internal state so that
+     * when control is switched to AUTO mode, good control values
+     * can be immediately applied.</p>
+     *
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     */
+    ACAMERA_CONTROL_MODE_OFF                                         = 0,
+
+    /**
+     * <p>Use settings for each individual 3A routine.</p>
+     * <p>Manual control of capture parameters is disabled. All
+     * controls in ACAMERA_CONTROL_* besides sceneMode take
+     * effect.</p>
+     */
+    ACAMERA_CONTROL_MODE_AUTO                                        = 1,
+
+    /**
+     * <p>Use a specific scene mode.</p>
+     * <p>Enabling this disables control.aeMode, control.awbMode and
+     * control.afMode controls; the camera device will ignore
+     * those settings while USE_SCENE_MODE is active (except for
+     * FACE_PRIORITY scene mode). Other control entries are still active.
+     * This setting can only be used if scene mode is supported (i.e.
+     * ACAMERA_CONTROL_AVAILABLE_SCENE_MODES
+     * contain some modes other than DISABLED).</p>
+     *
+     * @see ACAMERA_CONTROL_AVAILABLE_SCENE_MODES
+     */
+    ACAMERA_CONTROL_MODE_USE_SCENE_MODE                              = 2,
+
+    /**
+     * <p>Same as OFF mode, except that this capture will not be
+     * used by camera device background auto-exposure, auto-white balance and
+     * auto-focus algorithms (3A) to update their statistics.</p>
+     * <p>Specifically, the 3A routines are locked to the last
+     * values set from a request with AUTO, OFF, or
+     * USE_SCENE_MODE, and any statistics or state updates
+     * collected from manual captures with OFF_KEEP_STATE will be
+     * discarded by the camera device.</p>
+     */
+    ACAMERA_CONTROL_MODE_OFF_KEEP_STATE                              = 3,
+
+} acamera_metadata_enum_android_control_mode_t;
+
+// ACAMERA_CONTROL_SCENE_MODE
+typedef enum acamera_metadata_enum_acamera_control_scene_mode {
+    /**
+     * <p>Indicates that no scene modes are set for a given capture request.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_DISABLED                              = 0,
+
+    /**
+     * <p>If face detection support exists, use face
+     * detection data for auto-focus, auto-white balance, and
+     * auto-exposure routines.</p>
+     * <p>If face detection statistics are disabled
+     * (i.e. ACAMERA_STATISTICS_FACE_DETECT_MODE is set to OFF),
+     * this should still operate correctly (but will not return
+     * face detection statistics to the framework).</p>
+     * <p>Unlike the other scene modes, ACAMERA_CONTROL_AE_MODE,
+     * ACAMERA_CONTROL_AWB_MODE, and ACAMERA_CONTROL_AF_MODE
+     * remain active when FACE_PRIORITY is set.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AF_MODE
+     * @see ACAMERA_CONTROL_AWB_MODE
+     * @see ACAMERA_STATISTICS_FACE_DETECT_MODE
+     */
+    ACAMERA_CONTROL_SCENE_MODE_FACE_PRIORITY                         = 1,
+
+    /**
+     * <p>Optimized for photos of quickly moving objects.</p>
+     * <p>Similar to SPORTS.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_ACTION                                = 2,
+
+    /**
+     * <p>Optimized for still photos of people.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_PORTRAIT                              = 3,
+
+    /**
+     * <p>Optimized for photos of distant macroscopic objects.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_LANDSCAPE                             = 4,
+
+    /**
+     * <p>Optimized for low-light settings.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_NIGHT                                 = 5,
+
+    /**
+     * <p>Optimized for still photos of people in low-light
+     * settings.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_NIGHT_PORTRAIT                        = 6,
+
+    /**
+     * <p>Optimized for dim, indoor settings where flash must
+     * remain off.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_THEATRE                               = 7,
+
+    /**
+     * <p>Optimized for bright, outdoor beach settings.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_BEACH                                 = 8,
+
+    /**
+     * <p>Optimized for bright, outdoor settings containing snow.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_SNOW                                  = 9,
+
+    /**
+     * <p>Optimized for scenes of the setting sun.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_SUNSET                                = 10,
+
+    /**
+     * <p>Optimized to avoid blurry photos due to small amounts of
+     * device motion (for example: due to hand shake).</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_STEADYPHOTO                           = 11,
+
+    /**
+     * <p>Optimized for nighttime photos of fireworks.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_FIREWORKS                             = 12,
+
+    /**
+     * <p>Optimized for photos of quickly moving people.</p>
+     * <p>Similar to ACTION.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_SPORTS                                = 13,
+
+    /**
+     * <p>Optimized for dim, indoor settings with multiple moving
+     * people.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_PARTY                                 = 14,
+
+    /**
+     * <p>Optimized for dim settings where the main light source
+     * is a flame.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_CANDLELIGHT                           = 15,
+
+    /**
+     * <p>Optimized for accurately capturing a photo of barcode
+     * for use by camera applications that wish to read the
+     * barcode value.</p>
+     */
+    ACAMERA_CONTROL_SCENE_MODE_BARCODE                               = 16,
+
+    /**
+     * <p>Turn on a device-specific high dynamic range (HDR) mode.</p>
+     * <p>In this scene mode, the camera device captures images
+     * that keep a larger range of scene illumination levels
+     * visible in the final image. For example, when taking a
+     * picture of a object in front of a bright window, both
+     * the object and the scene through the window may be
+     * visible when using HDR mode, while in normal AUTO mode,
+     * one or the other may be poorly exposed. As a tradeoff,
+     * HDR mode generally takes much longer to capture a single
+     * image, has no user control, and may have other artifacts
+     * depending on the HDR method used.</p>
+     * <p>Therefore, HDR captures operate at a much slower rate
+     * than regular captures.</p>
+     * <p>In this mode, on LIMITED or FULL devices, when a request
+     * is made with a ACAMERA_CONTROL_CAPTURE_INTENT of
+     * STILL_CAPTURE, the camera device will capture an image
+     * using a high dynamic range capture technique.  On LEGACY
+     * devices, captures that target a JPEG-format output will
+     * be captured with HDR, and the capture intent is not
+     * relevant.</p>
+     * <p>The HDR capture may involve the device capturing a burst
+     * of images internally and combining them into one, or it
+     * may involve the device using specialized high dynamic
+     * range capture hardware. In all cases, a single image is
+     * produced in response to a capture request submitted
+     * while in HDR mode.</p>
+     * <p>Since substantial post-processing is generally needed to
+     * produce an HDR image, only YUV, PRIVATE, and JPEG
+     * outputs are supported for LIMITED/FULL device HDR
+     * captures, and only JPEG outputs are supported for LEGACY
+     * HDR captures. Using a RAW output for HDR capture is not
+     * supported.</p>
+     * <p>Some devices may also support always-on HDR, which
+     * applies HDR processing at full frame rate.  For these
+     * devices, intents other than STILL_CAPTURE will also
+     * produce an HDR output with no frame rate impact compared
+     * to normal operation, though the quality may be lower
+     * than for STILL_CAPTURE intents.</p>
+     * <p>If SCENE_MODE_HDR is used with unsupported output types
+     * or capture intents, the images captured will be as if
+     * the SCENE_MODE was not enabled at all.</p>
+     *
+     * @see ACAMERA_CONTROL_CAPTURE_INTENT
+     */
+    ACAMERA_CONTROL_SCENE_MODE_HDR                                   = 18,
+
+} acamera_metadata_enum_android_control_scene_mode_t;
+
+// ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
+typedef enum acamera_metadata_enum_acamera_control_video_stabilization_mode {
+    /**
+     * <p>Video stabilization is disabled.</p>
+     */
+    ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE_OFF                     = 0,
+
+    /**
+     * <p>Video stabilization is enabled.</p>
+     */
+    ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE_ON                      = 1,
+
+} acamera_metadata_enum_android_control_video_stabilization_mode_t;
+
+// ACAMERA_CONTROL_AE_STATE
+typedef enum acamera_metadata_enum_acamera_control_ae_state {
+    /**
+     * <p>AE is off or recently reset.</p>
+     * <p>When a camera device is opened, it starts in
+     * this state. This is a transient state, the camera device may skip reporting
+     * this state in capture result.</p>
+     */
+    ACAMERA_CONTROL_AE_STATE_INACTIVE                                = 0,
+
+    /**
+     * <p>AE doesn't yet have a good set of control values
+     * for the current scene.</p>
+     * <p>This is a transient state, the camera device may skip
+     * reporting this state in capture result.</p>
+     */
+    ACAMERA_CONTROL_AE_STATE_SEARCHING                               = 1,
+
+    /**
+     * <p>AE has a good set of control values for the
+     * current scene.</p>
+     */
+    ACAMERA_CONTROL_AE_STATE_CONVERGED                               = 2,
+
+    /**
+     * <p>AE has been locked.</p>
+     */
+    ACAMERA_CONTROL_AE_STATE_LOCKED                                  = 3,
+
+    /**
+     * <p>AE has a good set of control values, but flash
+     * needs to be fired for good quality still
+     * capture.</p>
+     */
+    ACAMERA_CONTROL_AE_STATE_FLASH_REQUIRED                          = 4,
+
+    /**
+     * <p>AE has been asked to do a precapture sequence
+     * and is currently executing it.</p>
+     * <p>Precapture can be triggered through setting
+     * ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER to START. Currently
+     * active and completed (if it causes camera device internal AE lock) precapture
+     * metering sequence can be canceled through setting
+     * ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER to CANCEL.</p>
+     * <p>Once PRECAPTURE completes, AE will transition to CONVERGED
+     * or FLASH_REQUIRED as appropriate. This is a transient
+     * state, the camera device may skip reporting this state in
+     * capture result.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     */
+    ACAMERA_CONTROL_AE_STATE_PRECAPTURE                              = 5,
+
+} acamera_metadata_enum_android_control_ae_state_t;
+
+// ACAMERA_CONTROL_AF_STATE
+typedef enum acamera_metadata_enum_acamera_control_af_state {
+    /**
+     * <p>AF is off or has not yet tried to scan/been asked
+     * to scan.</p>
+     * <p>When a camera device is opened, it starts in this
+     * state. This is a transient state, the camera device may
+     * skip reporting this state in capture
+     * result.</p>
+     */
+    ACAMERA_CONTROL_AF_STATE_INACTIVE                                = 0,
+
+    /**
+     * <p>AF is currently performing an AF scan initiated the
+     * camera device in a continuous autofocus mode.</p>
+     * <p>Only used by CONTINUOUS_* AF modes. This is a transient
+     * state, the camera device may skip reporting this state in
+     * capture result.</p>
+     */
+    ACAMERA_CONTROL_AF_STATE_PASSIVE_SCAN                            = 1,
+
+    /**
+     * <p>AF currently believes it is in focus, but may
+     * restart scanning at any time.</p>
+     * <p>Only used by CONTINUOUS_* AF modes. This is a transient
+     * state, the camera device may skip reporting this state in
+     * capture result.</p>
+     */
+    ACAMERA_CONTROL_AF_STATE_PASSIVE_FOCUSED                         = 2,
+
+    /**
+     * <p>AF is performing an AF scan because it was
+     * triggered by AF trigger.</p>
+     * <p>Only used by AUTO or MACRO AF modes. This is a transient
+     * state, the camera device may skip reporting this state in
+     * capture result.</p>
+     */
+    ACAMERA_CONTROL_AF_STATE_ACTIVE_SCAN                             = 3,
+
+    /**
+     * <p>AF believes it is focused correctly and has locked
+     * focus.</p>
+     * <p>This state is reached only after an explicit START AF trigger has been
+     * sent (ACAMERA_CONTROL_AF_TRIGGER), when good focus has been obtained.</p>
+     * <p>The lens will remain stationary until the AF mode (ACAMERA_CONTROL_AF_MODE) is changed or
+     * a new AF trigger is sent to the camera device (ACAMERA_CONTROL_AF_TRIGGER).</p>
+     *
+     * @see ACAMERA_CONTROL_AF_MODE
+     * @see ACAMERA_CONTROL_AF_TRIGGER
+     */
+    ACAMERA_CONTROL_AF_STATE_FOCUSED_LOCKED                          = 4,
+
+    /**
+     * <p>AF has failed to focus successfully and has locked
+     * focus.</p>
+     * <p>This state is reached only after an explicit START AF trigger has been
+     * sent (ACAMERA_CONTROL_AF_TRIGGER), when good focus cannot be obtained.</p>
+     * <p>The lens will remain stationary until the AF mode (ACAMERA_CONTROL_AF_MODE) is changed or
+     * a new AF trigger is sent to the camera device (ACAMERA_CONTROL_AF_TRIGGER).</p>
+     *
+     * @see ACAMERA_CONTROL_AF_MODE
+     * @see ACAMERA_CONTROL_AF_TRIGGER
+     */
+    ACAMERA_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED                      = 5,
+
+    /**
+     * <p>AF finished a passive scan without finding focus,
+     * and may restart scanning at any time.</p>
+     * <p>Only used by CONTINUOUS_* AF modes. This is a transient state, the camera
+     * device may skip reporting this state in capture result.</p>
+     * <p>LEGACY camera devices do not support this state. When a passive
+     * scan has finished, it will always go to PASSIVE_FOCUSED.</p>
+     */
+    ACAMERA_CONTROL_AF_STATE_PASSIVE_UNFOCUSED                       = 6,
+
+} acamera_metadata_enum_android_control_af_state_t;
+
+// ACAMERA_CONTROL_AWB_STATE
+typedef enum acamera_metadata_enum_acamera_control_awb_state {
+    /**
+     * <p>AWB is not in auto mode, or has not yet started metering.</p>
+     * <p>When a camera device is opened, it starts in this
+     * state. This is a transient state, the camera device may
+     * skip reporting this state in capture
+     * result.</p>
+     */
+    ACAMERA_CONTROL_AWB_STATE_INACTIVE                               = 0,
+
+    /**
+     * <p>AWB doesn't yet have a good set of control
+     * values for the current scene.</p>
+     * <p>This is a transient state, the camera device
+     * may skip reporting this state in capture result.</p>
+     */
+    ACAMERA_CONTROL_AWB_STATE_SEARCHING                              = 1,
+
+    /**
+     * <p>AWB has a good set of control values for the
+     * current scene.</p>
+     */
+    ACAMERA_CONTROL_AWB_STATE_CONVERGED                              = 2,
+
+    /**
+     * <p>AWB has been locked.</p>
+     */
+    ACAMERA_CONTROL_AWB_STATE_LOCKED                                 = 3,
+
+} acamera_metadata_enum_android_control_awb_state_t;
+
+// ACAMERA_CONTROL_AE_LOCK_AVAILABLE
+typedef enum acamera_metadata_enum_acamera_control_ae_lock_available {
+    ACAMERA_CONTROL_AE_LOCK_AVAILABLE_FALSE                          = 0,
+
+    ACAMERA_CONTROL_AE_LOCK_AVAILABLE_TRUE                           = 1,
+
+} acamera_metadata_enum_android_control_ae_lock_available_t;
+
+// ACAMERA_CONTROL_AWB_LOCK_AVAILABLE
+typedef enum acamera_metadata_enum_acamera_control_awb_lock_available {
+    ACAMERA_CONTROL_AWB_LOCK_AVAILABLE_FALSE                         = 0,
+
+    ACAMERA_CONTROL_AWB_LOCK_AVAILABLE_TRUE                          = 1,
+
+} acamera_metadata_enum_android_control_awb_lock_available_t;
+
+
+
+// ACAMERA_EDGE_MODE
+typedef enum acamera_metadata_enum_acamera_edge_mode {
+    /**
+     * <p>No edge enhancement is applied.</p>
+     */
+    ACAMERA_EDGE_MODE_OFF                                            = 0,
+
+    /**
+     * <p>Apply edge enhancement at a quality level that does not slow down frame rate
+     * relative to sensor output. It may be the same as OFF if edge enhancement will
+     * slow down frame rate relative to sensor.</p>
+     */
+    ACAMERA_EDGE_MODE_FAST                                           = 1,
+
+    /**
+     * <p>Apply high-quality edge enhancement, at a cost of possibly reduced output frame rate.</p>
+     */
+    ACAMERA_EDGE_MODE_HIGH_QUALITY                                   = 2,
+
+    /**
+     * <p>Edge enhancement is applied at different levels for different output streams,
+     * based on resolution. Streams at maximum recording resolution (see {@link
+     * ACameraDevice_createCaptureSession}) or below have
+     * edge enhancement applied, while higher-resolution streams have no edge enhancement
+     * applied. The level of edge enhancement for low-resolution streams is tuned so that
+     * frame rate is not impacted, and the quality is equal to or better than FAST (since it
+     * is only applied to lower-resolution outputs, quality may improve from FAST).</p>
+     * <p>This mode is intended to be used by applications operating in a zero-shutter-lag mode
+     * with YUV or PRIVATE reprocessing, where the application continuously captures
+     * high-resolution intermediate buffers into a circular buffer, from which a final image is
+     * produced via reprocessing when a user takes a picture.  For such a use case, the
+     * high-resolution buffers must not have edge enhancement applied to maximize efficiency of
+     * preview and to avoid double-applying enhancement when reprocessed, while low-resolution
+     * buffers (used for recording or preview, generally) need edge enhancement applied for
+     * reasonable preview quality.</p>
+     * <p>This mode is guaranteed to be supported by devices that support either the
+     * YUV_REPROCESSING or PRIVATE_REPROCESSING capabilities
+     * (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES lists either of those capabilities) and it will
+     * be the default mode for CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG template.</p>
+     *
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     */
+    ACAMERA_EDGE_MODE_ZERO_SHUTTER_LAG                               = 3,
+
+} acamera_metadata_enum_android_edge_mode_t;
+
+
+// ACAMERA_FLASH_MODE
+typedef enum acamera_metadata_enum_acamera_flash_mode {
+    /**
+     * <p>Do not fire the flash for this capture.</p>
+     */
+    ACAMERA_FLASH_MODE_OFF                                           = 0,
+
+    /**
+     * <p>If the flash is available and charged, fire flash
+     * for this capture.</p>
+     */
+    ACAMERA_FLASH_MODE_SINGLE                                        = 1,
+
+    /**
+     * <p>Transition flash to continuously on.</p>
+     */
+    ACAMERA_FLASH_MODE_TORCH                                         = 2,
+
+} acamera_metadata_enum_android_flash_mode_t;
+
+// ACAMERA_FLASH_STATE
+typedef enum acamera_metadata_enum_acamera_flash_state {
+    /**
+     * <p>No flash on camera.</p>
+     */
+    ACAMERA_FLASH_STATE_UNAVAILABLE                                  = 0,
+
+    /**
+     * <p>Flash is charging and cannot be fired.</p>
+     */
+    ACAMERA_FLASH_STATE_CHARGING                                     = 1,
+
+    /**
+     * <p>Flash is ready to fire.</p>
+     */
+    ACAMERA_FLASH_STATE_READY                                        = 2,
+
+    /**
+     * <p>Flash fired for this capture.</p>
+     */
+    ACAMERA_FLASH_STATE_FIRED                                        = 3,
+
+    /**
+     * <p>Flash partially illuminated this frame.</p>
+     * <p>This is usually due to the next or previous frame having
+     * the flash fire, and the flash spilling into this capture
+     * due to hardware limitations.</p>
+     */
+    ACAMERA_FLASH_STATE_PARTIAL                                      = 4,
+
+} acamera_metadata_enum_android_flash_state_t;
+
+
+// ACAMERA_FLASH_INFO_AVAILABLE
+typedef enum acamera_metadata_enum_acamera_flash_info_available {
+    ACAMERA_FLASH_INFO_AVAILABLE_FALSE                               = 0,
+
+    ACAMERA_FLASH_INFO_AVAILABLE_TRUE                                = 1,
+
+} acamera_metadata_enum_android_flash_info_available_t;
+
+
+// ACAMERA_HOT_PIXEL_MODE
+typedef enum acamera_metadata_enum_acamera_hot_pixel_mode {
+    /**
+     * <p>No hot pixel correction is applied.</p>
+     * <p>The frame rate must not be reduced relative to sensor raw output
+     * for this option.</p>
+     * <p>The hotpixel map may be returned in ACAMERA_STATISTICS_HOT_PIXEL_MAP.</p>
+     *
+     * @see ACAMERA_STATISTICS_HOT_PIXEL_MAP
+     */
+    ACAMERA_HOT_PIXEL_MODE_OFF                                       = 0,
+
+    /**
+     * <p>Hot pixel correction is applied, without reducing frame
+     * rate relative to sensor raw output.</p>
+     * <p>The hotpixel map may be returned in ACAMERA_STATISTICS_HOT_PIXEL_MAP.</p>
+     *
+     * @see ACAMERA_STATISTICS_HOT_PIXEL_MAP
+     */
+    ACAMERA_HOT_PIXEL_MODE_FAST                                      = 1,
+
+    /**
+     * <p>High-quality hot pixel correction is applied, at a cost
+     * of possibly reduced frame rate relative to sensor raw output.</p>
+     * <p>The hotpixel map may be returned in ACAMERA_STATISTICS_HOT_PIXEL_MAP.</p>
+     *
+     * @see ACAMERA_STATISTICS_HOT_PIXEL_MAP
+     */
+    ACAMERA_HOT_PIXEL_MODE_HIGH_QUALITY                              = 2,
+
+} acamera_metadata_enum_android_hot_pixel_mode_t;
+
+
+
+// ACAMERA_LENS_OPTICAL_STABILIZATION_MODE
+typedef enum acamera_metadata_enum_acamera_lens_optical_stabilization_mode {
+    /**
+     * <p>Optical stabilization is unavailable.</p>
+     */
+    ACAMERA_LENS_OPTICAL_STABILIZATION_MODE_OFF                      = 0,
+
+    /**
+     * <p>Optical stabilization is enabled.</p>
+     */
+    ACAMERA_LENS_OPTICAL_STABILIZATION_MODE_ON                       = 1,
+
+} acamera_metadata_enum_android_lens_optical_stabilization_mode_t;
+
+// ACAMERA_LENS_FACING
+typedef enum acamera_metadata_enum_acamera_lens_facing {
+    /**
+     * <p>The camera device faces the same direction as the device's screen.</p>
+     */
+    ACAMERA_LENS_FACING_FRONT                                        = 0,
+
+    /**
+     * <p>The camera device faces the opposite direction as the device's screen.</p>
+     */
+    ACAMERA_LENS_FACING_BACK                                         = 1,
+
+    /**
+     * <p>The camera device is an external camera, and has no fixed facing relative to the
+     * device's screen.</p>
+     */
+    ACAMERA_LENS_FACING_EXTERNAL                                     = 2,
+
+} acamera_metadata_enum_android_lens_facing_t;
+
+// ACAMERA_LENS_STATE
+typedef enum acamera_metadata_enum_acamera_lens_state {
+    /**
+     * <p>The lens parameters (ACAMERA_LENS_FOCAL_LENGTH, ACAMERA_LENS_FOCUS_DISTANCE,
+     * ACAMERA_LENS_FILTER_DENSITY and ACAMERA_LENS_APERTURE) are not changing.</p>
+     *
+     * @see ACAMERA_LENS_APERTURE
+     * @see ACAMERA_LENS_FILTER_DENSITY
+     * @see ACAMERA_LENS_FOCAL_LENGTH
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     */
+    ACAMERA_LENS_STATE_STATIONARY                                    = 0,
+
+    /**
+     * <p>One or several of the lens parameters
+     * (ACAMERA_LENS_FOCAL_LENGTH, ACAMERA_LENS_FOCUS_DISTANCE,
+     * ACAMERA_LENS_FILTER_DENSITY or ACAMERA_LENS_APERTURE) is
+     * currently changing.</p>
+     *
+     * @see ACAMERA_LENS_APERTURE
+     * @see ACAMERA_LENS_FILTER_DENSITY
+     * @see ACAMERA_LENS_FOCAL_LENGTH
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     */
+    ACAMERA_LENS_STATE_MOVING                                        = 1,
+
+} acamera_metadata_enum_android_lens_state_t;
+
+
+// ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
+typedef enum acamera_metadata_enum_acamera_lens_info_focus_distance_calibration {
+    /**
+     * <p>The lens focus distance is not accurate, and the units used for
+     * ACAMERA_LENS_FOCUS_DISTANCE do not correspond to any physical units.</p>
+     * <p>Setting the lens to the same focus distance on separate occasions may
+     * result in a different real focus distance, depending on factors such
+     * as the orientation of the device, the age of the focusing mechanism,
+     * and the device temperature. The focus distance value will still be
+     * in the range of <code>[0, ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE]</code>, where 0
+     * represents the farthest focus.</p>
+     *
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
+     */
+    ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED        = 0,
+
+    /**
+     * <p>The lens focus distance is measured in diopters.</p>
+     * <p>However, setting the lens to the same focus distance
+     * on separate occasions may result in a different real
+     * focus distance, depending on factors such as the
+     * orientation of the device, the age of the focusing
+     * mechanism, and the device temperature.</p>
+     */
+    ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE         = 1,
+
+    /**
+     * <p>The lens focus distance is measured in diopters, and
+     * is calibrated.</p>
+     * <p>The lens mechanism is calibrated so that setting the
+     * same focus distance is repeatable on multiple
+     * occasions with good accuracy, and the focus distance
+     * corresponds to the real physical distance to the plane
+     * of best focus.</p>
+     */
+    ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_CALIBRATED          = 2,
+
+} acamera_metadata_enum_android_lens_info_focus_distance_calibration_t;
+
+
+// ACAMERA_NOISE_REDUCTION_MODE
+typedef enum acamera_metadata_enum_acamera_noise_reduction_mode {
+    /**
+     * <p>No noise reduction is applied.</p>
+     */
+    ACAMERA_NOISE_REDUCTION_MODE_OFF                                 = 0,
+
+    /**
+     * <p>Noise reduction is applied without reducing frame rate relative to sensor
+     * output. It may be the same as OFF if noise reduction will reduce frame rate
+     * relative to sensor.</p>
+     */
+    ACAMERA_NOISE_REDUCTION_MODE_FAST                                = 1,
+
+    /**
+     * <p>High-quality noise reduction is applied, at the cost of possibly reduced frame
+     * rate relative to sensor output.</p>
+     */
+    ACAMERA_NOISE_REDUCTION_MODE_HIGH_QUALITY                        = 2,
+
+    /**
+     * <p>MINIMAL noise reduction is applied without reducing frame rate relative to
+     * sensor output. </p>
+     */
+    ACAMERA_NOISE_REDUCTION_MODE_MINIMAL                             = 3,
+
+    /**
+     * <p>Noise reduction is applied at different levels for different output streams,
+     * based on resolution. Streams at maximum recording resolution (see {@link
+     * ACameraDevice_createCaptureSession}) or below have noise
+     * reduction applied, while higher-resolution streams have MINIMAL (if supported) or no
+     * noise reduction applied (if MINIMAL is not supported.) The degree of noise reduction
+     * for low-resolution streams is tuned so that frame rate is not impacted, and the quality
+     * is equal to or better than FAST (since it is only applied to lower-resolution outputs,
+     * quality may improve from FAST).</p>
+     * <p>This mode is intended to be used by applications operating in a zero-shutter-lag mode
+     * with YUV or PRIVATE reprocessing, where the application continuously captures
+     * high-resolution intermediate buffers into a circular buffer, from which a final image is
+     * produced via reprocessing when a user takes a picture.  For such a use case, the
+     * high-resolution buffers must not have noise reduction applied to maximize efficiency of
+     * preview and to avoid over-applying noise filtering when reprocessing, while
+     * low-resolution buffers (used for recording or preview, generally) need noise reduction
+     * applied for reasonable preview quality.</p>
+     * <p>This mode is guaranteed to be supported by devices that support either the
+     * YUV_REPROCESSING or PRIVATE_REPROCESSING capabilities
+     * (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES lists either of those capabilities) and it will
+     * be the default mode for CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG template.</p>
+     *
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     */
+    ACAMERA_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG                    = 4,
+
+} acamera_metadata_enum_android_noise_reduction_mode_t;
+
+
+
+// ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+typedef enum acamera_metadata_enum_acamera_request_available_capabilities {
+    /**
+     * <p>The minimal set of capabilities that every camera
+     * device (regardless of ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL)
+     * supports.</p>
+     * <p>This capability is listed by all normal devices, and
+     * indicates that the camera device has a feature set
+     * that's comparable to the baseline requirements for the
+     * older android.hardware.Camera API.</p>
+     * <p>Devices with the DEPTH_OUTPUT capability might not list this
+     * capability, indicating that they support only depth measurement,
+     * not standard color output.</p>
+     *
+     * @see ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE       = 0,
+
+    /**
+     * <p>The camera device can be manually controlled (3A algorithms such
+     * as auto-exposure, and auto-focus can be bypassed).
+     * The camera device supports basic manual control of the sensor image
+     * acquisition related stages. This means the following controls are
+     * guaranteed to be supported:</p>
+     * <ul>
+     * <li>Manual frame duration control<ul>
+     * <li>ACAMERA_SENSOR_FRAME_DURATION</li>
+     * <li>ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION</li>
+     * </ul>
+     * </li>
+     * <li>Manual exposure control<ul>
+     * <li>ACAMERA_SENSOR_EXPOSURE_TIME</li>
+     * <li>ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE</li>
+     * </ul>
+     * </li>
+     * <li>Manual sensitivity control<ul>
+     * <li>ACAMERA_SENSOR_SENSITIVITY</li>
+     * <li>ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE</li>
+     * </ul>
+     * </li>
+     * <li>Manual lens control (if the lens is adjustable)<ul>
+     * <li>ACAMERA_LENS_*</li>
+     * </ul>
+     * </li>
+     * <li>Manual flash control (if a flash unit is present)<ul>
+     * <li>ACAMERA_FLASH_*</li>
+     * </ul>
+     * </li>
+     * <li>Manual black level locking<ul>
+     * <li>ACAMERA_BLACK_LEVEL_LOCK</li>
+     * </ul>
+     * </li>
+     * <li>Auto exposure lock<ul>
+     * <li>ACAMERA_CONTROL_AE_LOCK</li>
+     * </ul>
+     * </li>
+     * </ul>
+     * <p>If any of the above 3A algorithms are enabled, then the camera
+     * device will accurately report the values applied by 3A in the
+     * result.</p>
+     * <p>A given camera device may also support additional manual sensor controls,
+     * but this capability only covers the above list of controls.</p>
+     * <p>If this is supported, android.scaler.streamConfigurationMap will
+     * additionally return a min frame duration that is greater than
+     * zero for each supported size-format combination.</p>
+     *
+     * @see ACAMERA_BLACK_LEVEL_LOCK
+     * @see ACAMERA_CONTROL_AE_LOCK
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     * @see ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE
+     * @see ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION
+     * @see ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR             = 1,
+
+    /**
+     * <p>The camera device post-processing stages can be manually controlled.
+     * The camera device supports basic manual control of the image post-processing
+     * stages. This means the following controls are guaranteed to be supported:</p>
+     * <ul>
+     * <li>
+     * <p>Manual tonemap control</p>
+     * <ul>
+     * <li>android.tonemap.curve</li>
+     * <li>ACAMERA_TONEMAP_MODE</li>
+     * <li>ACAMERA_TONEMAP_MAX_CURVE_POINTS</li>
+     * <li>ACAMERA_TONEMAP_GAMMA</li>
+     * <li>ACAMERA_TONEMAP_PRESET_CURVE</li>
+     * </ul>
+     * </li>
+     * <li>
+     * <p>Manual white balance control</p>
+     * <ul>
+     * <li>ACAMERA_COLOR_CORRECTION_TRANSFORM</li>
+     * <li>ACAMERA_COLOR_CORRECTION_GAINS</li>
+     * </ul>
+     * </li>
+     * <li>Manual lens shading map control<ul>
+     * <li>ACAMERA_SHADING_MODE</li>
+     * <li>ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE</li>
+     * <li>ACAMERA_STATISTICS_LENS_SHADING_MAP</li>
+     * <li>ACAMERA_LENS_INFO_SHADING_MAP_SIZE</li>
+     * </ul>
+     * </li>
+     * <li>Manual aberration correction control (if aberration correction is supported)<ul>
+     * <li>ACAMERA_COLOR_CORRECTION_ABERRATION_MODE</li>
+     * <li>ACAMERA_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES</li>
+     * </ul>
+     * </li>
+     * <li>Auto white balance lock<ul>
+     * <li>ACAMERA_CONTROL_AWB_LOCK</li>
+     * </ul>
+     * </li>
+     * </ul>
+     * <p>If auto white balance is enabled, then the camera device
+     * will accurately report the values applied by AWB in the result.</p>
+     * <p>A given camera device may also support additional post-processing
+     * controls, but this capability only covers the above list of controls.</p>
+     *
+     * @see ACAMERA_COLOR_CORRECTION_ABERRATION_MODE
+     * @see ACAMERA_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES
+     * @see ACAMERA_COLOR_CORRECTION_GAINS
+     * @see ACAMERA_COLOR_CORRECTION_TRANSFORM
+     * @see ACAMERA_CONTROL_AWB_LOCK
+     * @see ACAMERA_LENS_INFO_SHADING_MAP_SIZE
+     * @see ACAMERA_SHADING_MODE
+     * @see ACAMERA_STATISTICS_LENS_SHADING_MAP
+     * @see ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE
+     * @see ACAMERA_TONEMAP_GAMMA
+     * @see ACAMERA_TONEMAP_MAX_CURVE_POINTS
+     * @see ACAMERA_TONEMAP_MODE
+     * @see ACAMERA_TONEMAP_PRESET_CURVE
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING    = 2,
+
+    /**
+     * <p>The camera device supports outputting RAW buffers and
+     * metadata for interpreting them.</p>
+     * <p>Devices supporting the RAW capability allow both for
+     * saving DNG files, and for direct application processing of
+     * raw sensor images.</p>
+     * <ul>
+     * <li>RAW_SENSOR is supported as an output format.</li>
+     * <li>The maximum available resolution for RAW_SENSOR streams
+     *   will match either the value in
+     *   ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE or
+     *   ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE.</li>
+     * <li>All DNG-related optional metadata entries are provided
+     *   by the camera device.</li>
+     * </ul>
+     *
+     * @see ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
+     * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_RAW                       = 3,
+
+    /**
+     * <p>The camera device supports accurately reporting the sensor settings for many of
+     * the sensor controls while the built-in 3A algorithm is running.  This allows
+     * reporting of sensor settings even when these settings cannot be manually changed.</p>
+     * <p>The values reported for the following controls are guaranteed to be available
+     * in the CaptureResult, including when 3A is enabled:</p>
+     * <ul>
+     * <li>Exposure control<ul>
+     * <li>ACAMERA_SENSOR_EXPOSURE_TIME</li>
+     * </ul>
+     * </li>
+     * <li>Sensitivity control<ul>
+     * <li>ACAMERA_SENSOR_SENSITIVITY</li>
+     * </ul>
+     * </li>
+     * <li>Lens controls (if the lens is adjustable)<ul>
+     * <li>ACAMERA_LENS_FOCUS_DISTANCE</li>
+     * <li>ACAMERA_LENS_APERTURE</li>
+     * </ul>
+     * </li>
+     * </ul>
+     * <p>This capability is a subset of the MANUAL_SENSOR control capability, and will
+     * always be included if the MANUAL_SENSOR capability is available.</p>
+     *
+     * @see ACAMERA_LENS_APERTURE
+     * @see ACAMERA_LENS_FOCUS_DISTANCE
+     * @see ACAMERA_SENSOR_EXPOSURE_TIME
+     * @see ACAMERA_SENSOR_SENSITIVITY
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS      = 5,
+
+    /**
+     * <p>The camera device supports capturing high-resolution images at &gt;= 20 frames per
+     * second, in at least the uncompressed YUV format, when post-processing settings are set
+     * to FAST. Additionally, maximum-resolution images can be captured at &gt;= 10 frames
+     * per second.  Here, 'high resolution' means at least 8 megapixels, or the maximum
+     * resolution of the device, whichever is smaller.</p>
+     * <p>More specifically, this means that at least one output {@link
+     * AIMAGE_FORMAT_YUV_420_888} size listed in
+     * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS} is larger or equal to the
+     * 'high resolution' defined above, and can be captured at at least 20 fps.
+     * For the largest {@link AIMAGE_FORMAT_YUV_420_888} size listed in
+     * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}, camera device can capture this
+     * size for at least 10 frames per second.
+     * Also the ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES entry lists at least one FPS range
+     * where the minimum FPS is &gt;= 1 / minimumFrameDuration for the largest YUV_420_888 size.</p>
+     * <p>If the device supports the {@link AIMAGE_FORMAT_RAW10}, {@link
+     * AIMAGE_FORMAT_RAW12}, then those can also be captured at the same rate
+     * as the maximum-size YUV_420_888 resolution is.</p>
+     * <p>In addition, the ACAMERA_SYNC_MAX_LATENCY field is guaranted to have a value between 0
+     * and 4, inclusive. ACAMERA_CONTROL_AE_LOCK_AVAILABLE and ACAMERA_CONTROL_AWB_LOCK_AVAILABLE
+     * are also guaranteed to be <code>true</code> so burst capture with these two locks ON yields
+     * consistent image output.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES
+     * @see ACAMERA_CONTROL_AE_LOCK_AVAILABLE
+     * @see ACAMERA_CONTROL_AWB_LOCK_AVAILABLE
+     * @see ACAMERA_SYNC_MAX_LATENCY
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE             = 6,
+
+    /**
+     * <p>The camera device can produce depth measurements from its field of view.</p>
+     * <p>This capability requires the camera device to support the following:</p>
+     * <ul>
+     * <li>{@link AIMAGE_FORMAT_DEPTH16} is supported as an output format.</li>
+     * <li>{@link AIMAGE_FORMAT_DEPTH_POINT_CLOUD} is optionally supported as an
+     *   output format.</li>
+     * <li>This camera device, and all camera devices with the same ACAMERA_LENS_FACING,
+     *   will list the following calibration entries in {@link ACameraMetadata} from both
+     *   {@link ACameraManager_getCameraCharacteristics} and
+     *   {@link ACameraCaptureSession_captureCallback_result}:<ul>
+     * <li>ACAMERA_LENS_POSE_TRANSLATION</li>
+     * <li>ACAMERA_LENS_POSE_ROTATION</li>
+     * <li>ACAMERA_LENS_INTRINSIC_CALIBRATION</li>
+     * <li>ACAMERA_LENS_RADIAL_DISTORTION</li>
+     * </ul>
+     * </li>
+     * <li>The ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE entry is listed by this device.</li>
+     * <li>A LIMITED camera with only the DEPTH_OUTPUT capability does not have to support
+     *   normal YUV_420_888, JPEG, and PRIV-format outputs. It only has to support the DEPTH16
+     *   format.</li>
+     * </ul>
+     * <p>Generally, depth output operates at a slower frame rate than standard color capture,
+     * so the DEPTH16 and DEPTH_POINT_CLOUD formats will commonly have a stall duration that
+     * should be accounted for (see
+     * {@link ACAMERA_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS}).
+     * On a device that supports both depth and color-based output, to enable smooth preview,
+     * using a repeating burst is recommended, where a depth-output target is only included
+     * once every N frames, where N is the ratio between preview output rate and depth output
+     * rate, including depth stall time.</p>
+     *
+     * @see ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE
+     * @see ACAMERA_LENS_FACING
+     * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
+     * @see ACAMERA_LENS_POSE_ROTATION
+     * @see ACAMERA_LENS_POSE_TRANSLATION
+     * @see ACAMERA_LENS_RADIAL_DISTORTION
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT              = 8,
+
+} acamera_metadata_enum_android_request_available_capabilities_t;
+
+
+// ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
+typedef enum acamera_metadata_enum_acamera_scaler_available_stream_configurations {
+    ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT            = 0,
+
+    ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT             = 1,
+
+} acamera_metadata_enum_android_scaler_available_stream_configurations_t;
+
+// ACAMERA_SCALER_CROPPING_TYPE
+typedef enum acamera_metadata_enum_acamera_scaler_cropping_type {
+    /**
+     * <p>The camera device only supports centered crop regions.</p>
+     */
+    ACAMERA_SCALER_CROPPING_TYPE_CENTER_ONLY                         = 0,
+
+    /**
+     * <p>The camera device supports arbitrarily chosen crop regions.</p>
+     */
+    ACAMERA_SCALER_CROPPING_TYPE_FREEFORM                            = 1,
+
+} acamera_metadata_enum_android_scaler_cropping_type_t;
+
+
+// ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
+typedef enum acamera_metadata_enum_acamera_sensor_reference_illuminant1 {
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT                    = 1,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT                 = 2,
+
+    /**
+     * <p>Incandescent light</p>
+     */
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN                    = 3,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_FLASH                       = 4,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER                = 9,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER              = 10,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_SHADE                       = 11,
+
+    /**
+     * <p>D 5700 - 7100K</p>
+     */
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT        = 12,
+
+    /**
+     * <p>N 4600 - 5400K</p>
+     */
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT       = 13,
+
+    /**
+     * <p>W 3900 - 4500K</p>
+     */
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT      = 14,
+
+    /**
+     * <p>WW 3200 - 3700K</p>
+     */
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT           = 15,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A                  = 17,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_B                  = 18,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_C                  = 19,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_D55                         = 20,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_D65                         = 21,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_D75                         = 22,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_D50                         = 23,
+
+    ACAMERA_SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN         = 24,
+
+} acamera_metadata_enum_android_sensor_reference_illuminant1_t;
+
+// ACAMERA_SENSOR_TEST_PATTERN_MODE
+typedef enum acamera_metadata_enum_acamera_sensor_test_pattern_mode {
+    /**
+     * <p>No test pattern mode is used, and the camera
+     * device returns captures from the image sensor.</p>
+     * <p>This is the default if the key is not set.</p>
+     */
+    ACAMERA_SENSOR_TEST_PATTERN_MODE_OFF                             = 0,
+
+    /**
+     * <p>Each pixel in <code>[R, G_even, G_odd, B]</code> is replaced by its
+     * respective color channel provided in
+     * ACAMERA_SENSOR_TEST_PATTERN_DATA.</p>
+     * <p>For example:</p>
+     * <pre><code>android.testPatternData = [0, 0xFFFFFFFF, 0xFFFFFFFF, 0]
+     * </code></pre>
+     * <p>All green pixels are 100% green. All red/blue pixels are black.</p>
+     * <pre><code>android.testPatternData = [0xFFFFFFFF, 0, 0xFFFFFFFF, 0]
+     * </code></pre>
+     * <p>All red pixels are 100% red. Only the odd green pixels
+     * are 100% green. All blue pixels are 100% black.</p>
+     *
+     * @see ACAMERA_SENSOR_TEST_PATTERN_DATA
+     */
+    ACAMERA_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR                     = 1,
+
+    /**
+     * <p>All pixel data is replaced with an 8-bar color pattern.</p>
+     * <p>The vertical bars (left-to-right) are as follows:</p>
+     * <ul>
+     * <li>100% white</li>
+     * <li>yellow</li>
+     * <li>cyan</li>
+     * <li>green</li>
+     * <li>magenta</li>
+     * <li>red</li>
+     * <li>blue</li>
+     * <li>black</li>
+     * </ul>
+     * <p>In general the image would look like the following:</p>
+     * <pre><code>W Y C G M R B K
+     * W Y C G M R B K
+     * W Y C G M R B K
+     * W Y C G M R B K
+     * W Y C G M R B K
+     * . . . . . . . .
+     * . . . . . . . .
+     * . . . . . . . .
+     *
+     * (B = Blue, K = Black)
+     * </code></pre>
+     * <p>Each bar should take up 1/8 of the sensor pixel array width.
+     * When this is not possible, the bar size should be rounded
+     * down to the nearest integer and the pattern can repeat
+     * on the right side.</p>
+     * <p>Each bar's height must always take up the full sensor
+     * pixel array height.</p>
+     * <p>Each pixel in this test pattern must be set to either
+     * 0% intensity or 100% intensity.</p>
+     */
+    ACAMERA_SENSOR_TEST_PATTERN_MODE_COLOR_BARS                      = 2,
+
+    /**
+     * <p>The test pattern is similar to COLOR_BARS, except that
+     * each bar should start at its specified color at the top,
+     * and fade to gray at the bottom.</p>
+     * <p>Furthermore each bar is further subdivided into a left and
+     * right half. The left half should have a smooth gradient,
+     * and the right half should have a quantized gradient.</p>
+     * <p>In particular, the right half's should consist of blocks of the
+     * same color for 1/16th active sensor pixel array width.</p>
+     * <p>The least significant bits in the quantized gradient should
+     * be copied from the most significant bits of the smooth gradient.</p>
+     * <p>The height of each bar should always be a multiple of 128.
+     * When this is not the case, the pattern should repeat at the bottom
+     * of the image.</p>
+     */
+    ACAMERA_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY         = 3,
+
+    /**
+     * <p>All pixel data is replaced by a pseudo-random sequence
+     * generated from a PN9 512-bit sequence (typically implemented
+     * in hardware with a linear feedback shift register).</p>
+     * <p>The generator should be reset at the beginning of each frame,
+     * and thus each subsequent raw frame with this test pattern should
+     * be exactly the same as the last.</p>
+     */
+    ACAMERA_SENSOR_TEST_PATTERN_MODE_PN9                             = 4,
+
+    /**
+     * <p>The first custom test pattern. All custom patterns that are
+     * available only on this camera device are at least this numeric
+     * value.</p>
+     * <p>All of the custom test patterns will be static
+     * (that is the raw image must not vary from frame to frame).</p>
+     */
+    ACAMERA_SENSOR_TEST_PATTERN_MODE_CUSTOM1                         = 256,
+
+} acamera_metadata_enum_android_sensor_test_pattern_mode_t;
+
+
+// ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
+typedef enum acamera_metadata_enum_acamera_sensor_info_color_filter_arrangement {
+    ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB                = 0,
+
+    ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG                = 1,
+
+    ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GBRG                = 2,
+
+    ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_BGGR                = 3,
+
+    /**
+     * <p>Sensor is not Bayer; output has 3 16-bit
+     * values for each pixel, instead of just 1 16-bit value
+     * per pixel.</p>
+     */
+    ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGB                 = 4,
+
+} acamera_metadata_enum_android_sensor_info_color_filter_arrangement_t;
+
+// ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE
+typedef enum acamera_metadata_enum_acamera_sensor_info_timestamp_source {
+    /**
+     * <p>Timestamps from ACAMERA_SENSOR_TIMESTAMP are in nanoseconds and monotonic,
+     * but can not be compared to timestamps from other subsystems
+     * (e.g. accelerometer, gyro etc.), or other instances of the same or different
+     * camera devices in the same system. Timestamps between streams and results for
+     * a single camera instance are comparable, and the timestamps for all buffers
+     * and the result metadata generated by a single capture are identical.</p>
+     *
+     * @see ACAMERA_SENSOR_TIMESTAMP
+     */
+    ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN                     = 0,
+
+    /**
+     * <p>Timestamps from ACAMERA_SENSOR_TIMESTAMP are in the same timebase as
+     * <a href="https://developer.android.com/reference/android/os/SystemClock.html#elapsedRealtimeNanos">elapsedRealtimeNanos</a>
+     * (or CLOCK_BOOTTIME), and they can be compared to other timestamps using that base.</p>
+     *
+     * @see ACAMERA_SENSOR_TIMESTAMP
+     */
+    ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME                    = 1,
+
+} acamera_metadata_enum_android_sensor_info_timestamp_source_t;
+
+// ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED
+typedef enum acamera_metadata_enum_acamera_sensor_info_lens_shading_applied {
+    ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED_FALSE                   = 0,
+
+    ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED_TRUE                    = 1,
+
+} acamera_metadata_enum_android_sensor_info_lens_shading_applied_t;
+
+
+// ACAMERA_SHADING_MODE
+typedef enum acamera_metadata_enum_acamera_shading_mode {
+    /**
+     * <p>No lens shading correction is applied.</p>
+     */
+    ACAMERA_SHADING_MODE_OFF                                         = 0,
+
+    /**
+     * <p>Apply lens shading corrections, without slowing
+     * frame rate relative to sensor raw output</p>
+     */
+    ACAMERA_SHADING_MODE_FAST                                        = 1,
+
+    /**
+     * <p>Apply high-quality lens shading correction, at the
+     * cost of possibly reduced frame rate.</p>
+     */
+    ACAMERA_SHADING_MODE_HIGH_QUALITY                                = 2,
+
+} acamera_metadata_enum_android_shading_mode_t;
+
+
+// ACAMERA_STATISTICS_FACE_DETECT_MODE
+typedef enum acamera_metadata_enum_acamera_statistics_face_detect_mode {
+    /**
+     * <p>Do not include face detection statistics in capture
+     * results.</p>
+     */
+    ACAMERA_STATISTICS_FACE_DETECT_MODE_OFF                          = 0,
+
+    /**
+     * <p>Return face rectangle and confidence values only.</p>
+     */
+    ACAMERA_STATISTICS_FACE_DETECT_MODE_SIMPLE                       = 1,
+
+    /**
+     * <p>Return all face
+     * metadata.</p>
+     * <p>In this mode, face rectangles, scores, landmarks, and face IDs are all valid.</p>
+     */
+    ACAMERA_STATISTICS_FACE_DETECT_MODE_FULL                         = 2,
+
+} acamera_metadata_enum_android_statistics_face_detect_mode_t;
+
+// ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE
+typedef enum acamera_metadata_enum_acamera_statistics_hot_pixel_map_mode {
+    /**
+     * <p>Hot pixel map production is disabled.</p>
+     */
+    ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE_OFF                        = 0,
+
+    /**
+     * <p>Hot pixel map production is enabled.</p>
+     */
+    ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE_ON                         = 1,
+
+} acamera_metadata_enum_android_statistics_hot_pixel_map_mode_t;
+
+// ACAMERA_STATISTICS_SCENE_FLICKER
+typedef enum acamera_metadata_enum_acamera_statistics_scene_flicker {
+    /**
+     * <p>The camera device does not detect any flickering illumination
+     * in the current scene.</p>
+     */
+    ACAMERA_STATISTICS_SCENE_FLICKER_NONE                            = 0,
+
+    /**
+     * <p>The camera device detects illumination flickering at 50Hz
+     * in the current scene.</p>
+     */
+    ACAMERA_STATISTICS_SCENE_FLICKER_50HZ                            = 1,
+
+    /**
+     * <p>The camera device detects illumination flickering at 60Hz
+     * in the current scene.</p>
+     */
+    ACAMERA_STATISTICS_SCENE_FLICKER_60HZ                            = 2,
+
+} acamera_metadata_enum_android_statistics_scene_flicker_t;
+
+// ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE
+typedef enum acamera_metadata_enum_acamera_statistics_lens_shading_map_mode {
+    /**
+     * <p>Do not include a lens shading map in the capture result.</p>
+     */
+    ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE_OFF                     = 0,
+
+    /**
+     * <p>Include a lens shading map in the capture result.</p>
+     */
+    ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE_ON                      = 1,
+
+} acamera_metadata_enum_android_statistics_lens_shading_map_mode_t;
+
+
+
+// ACAMERA_TONEMAP_MODE
+typedef enum acamera_metadata_enum_acamera_tonemap_mode {
+    /**
+     * <p>Use the tone mapping curve specified in
+     * the ACAMERA_TONEMAPCURVE_* entries.</p>
+     * <p>All color enhancement and tonemapping must be disabled, except
+     * for applying the tonemapping curve specified by
+     * android.tonemap.curve.</p>
+     * <p>Must not slow down frame rate relative to raw
+     * sensor output.</p>
+     */
+    ACAMERA_TONEMAP_MODE_CONTRAST_CURVE                              = 0,
+
+    /**
+     * <p>Advanced gamma mapping and color enhancement may be applied, without
+     * reducing frame rate compared to raw sensor output.</p>
+     */
+    ACAMERA_TONEMAP_MODE_FAST                                        = 1,
+
+    /**
+     * <p>High-quality gamma mapping and color enhancement will be applied, at
+     * the cost of possibly reduced frame rate compared to raw sensor output.</p>
+     */
+    ACAMERA_TONEMAP_MODE_HIGH_QUALITY                                = 2,
+
+    /**
+     * <p>Use the gamma value specified in ACAMERA_TONEMAP_GAMMA to peform
+     * tonemapping.</p>
+     * <p>All color enhancement and tonemapping must be disabled, except
+     * for applying the tonemapping curve specified by ACAMERA_TONEMAP_GAMMA.</p>
+     * <p>Must not slow down frame rate relative to raw sensor output.</p>
+     *
+     * @see ACAMERA_TONEMAP_GAMMA
+     */
+    ACAMERA_TONEMAP_MODE_GAMMA_VALUE                                 = 3,
+
+    /**
+     * <p>Use the preset tonemapping curve specified in
+     * ACAMERA_TONEMAP_PRESET_CURVE to peform tonemapping.</p>
+     * <p>All color enhancement and tonemapping must be disabled, except
+     * for applying the tonemapping curve specified by
+     * ACAMERA_TONEMAP_PRESET_CURVE.</p>
+     * <p>Must not slow down frame rate relative to raw sensor output.</p>
+     *
+     * @see ACAMERA_TONEMAP_PRESET_CURVE
+     */
+    ACAMERA_TONEMAP_MODE_PRESET_CURVE                                = 4,
+
+} acamera_metadata_enum_android_tonemap_mode_t;
+
+// ACAMERA_TONEMAP_PRESET_CURVE
+typedef enum acamera_metadata_enum_acamera_tonemap_preset_curve {
+    /**
+     * <p>Tonemapping curve is defined by sRGB</p>
+     */
+    ACAMERA_TONEMAP_PRESET_CURVE_SRGB                                = 0,
+
+    /**
+     * <p>Tonemapping curve is defined by ITU-R BT.709</p>
+     */
+    ACAMERA_TONEMAP_PRESET_CURVE_REC709                              = 1,
+
+} acamera_metadata_enum_android_tonemap_preset_curve_t;
+
+
+
+// ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL
+typedef enum acamera_metadata_enum_acamera_info_supported_hardware_level {
+    /**
+     * <p>This camera device does not have enough capabilities to qualify as a <code>FULL</code> device or
+     * better.</p>
+     * <p>Only the stream configurations listed in the <code>LEGACY</code> and <code>LIMITED</code> tables in the
+     * {@link ACameraDevice_createCaptureSession} documentation are guaranteed to be supported.</p>
+     * <p>All <code>LIMITED</code> devices support the <code>BACKWARDS_COMPATIBLE</code> capability, indicating basic
+     * support for color image capture. The only exception is that the device may
+     * alternatively support only the <code>DEPTH_OUTPUT</code> capability, if it can only output depth
+     * measurements and not color images.</p>
+     * <p><code>LIMITED</code> devices and above require the use of ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * to lock exposure metering (and calculate flash power, for cameras with flash) before
+     * capturing a high-quality still image.</p>
+     * <p>A <code>LIMITED</code> device that only lists the <code>BACKWARDS_COMPATIBLE</code> capability is only
+     * required to support full-automatic operation and post-processing (<code>OFF</code> is not
+     * supported for ACAMERA_CONTROL_AE_MODE, ACAMERA_CONTROL_AF_MODE, or
+     * ACAMERA_CONTROL_AWB_MODE)</p>
+     * <p>Additional capabilities may optionally be supported by a <code>LIMITED</code>-level device, and
+     * can be checked for in ACAMERA_REQUEST_AVAILABLE_CAPABILITIES.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_MODE
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * @see ACAMERA_CONTROL_AF_MODE
+     * @see ACAMERA_CONTROL_AWB_MODE
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     */
+    ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED                    = 0,
+
+    /**
+     * <p>This camera device is capable of supporting advanced imaging applications.</p>
+     * <p>The stream configurations listed in the <code>FULL</code>, <code>LEGACY</code> and <code>LIMITED</code> tables in the
+     * {@link ACameraDevice_createCaptureSession} documentation are guaranteed to be supported.</p>
+     * <p>A <code>FULL</code> device will support below capabilities:</p>
+     * <ul>
+     * <li><code>BURST_CAPTURE</code> capability (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
+     *   <code>BURST_CAPTURE</code>)</li>
+     * <li>Per frame control (ACAMERA_SYNC_MAX_LATENCY <code>==</code> PER_FRAME_CONTROL)</li>
+     * <li>Manual sensor control (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains <code>MANUAL_SENSOR</code>)</li>
+     * <li>Manual post-processing control (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
+     *   <code>MANUAL_POST_PROCESSING</code>)</li>
+     * <li>The required exposure time range defined in ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE</li>
+     * <li>The required maxFrameDuration defined in ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION</li>
+     * </ul>
+     * <p>Note:
+     * Pre-API level 23, FULL devices also supported arbitrary cropping region
+     * (ACAMERA_SCALER_CROPPING_TYPE <code>== FREEFORM</code>); this requirement was relaxed in API level
+     * 23, and <code>FULL</code> devices may only support <code>CENTERED</code> cropping.</p>
+     *
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     * @see ACAMERA_SCALER_CROPPING_TYPE
+     * @see ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE
+     * @see ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION
+     * @see ACAMERA_SYNC_MAX_LATENCY
+     */
+    ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_FULL                       = 1,
+
+    /**
+     * <p>This camera device is running in backward compatibility mode.</p>
+     * <p>Only the stream configurations listed in the <code>LEGACY</code> table in the {@link
+     * ACameraDevice_createCaptureSession} documentation are supported.</p>
+     * <p>A <code>LEGACY</code> device does not support per-frame control, manual sensor control, manual
+     * post-processing, arbitrary cropping regions, and has relaxed performance constraints.
+     * No additional capabilities beyond <code>BACKWARD_COMPATIBLE</code> will ever be listed by a
+     * <code>LEGACY</code> device in ACAMERA_REQUEST_AVAILABLE_CAPABILITIES.</p>
+     * <p>In addition, the ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is not functional on <code>LEGACY</code>
+     * devices. Instead, every request that includes a JPEG-format output target is treated
+     * as triggering a still capture, internally executing a precapture trigger.  This may
+     * fire the flash for flash power metering during precapture, and then fire the flash
+     * for the final capture, if a flash is available on the device and the AE mode is set to
+     * enable the flash.</p>
+     *
+     * @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     */
+    ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY                     = 2,
+
+    /**
+     * <p>This camera device is capable of YUV reprocessing and RAW data capture, in addition to
+     * FULL-level capabilities.</p>
+     * <p>The stream configurations listed in the <code>LEVEL_3</code>, <code>RAW</code>, <code>FULL</code>, <code>LEGACY</code> and
+     * <code>LIMITED</code> tables in the {@link
+     * ACameraDevice_createCaptureSession}
+     * documentation are guaranteed to be supported.</p>
+     * <p>The following additional capabilities are guaranteed to be supported:</p>
+     * <ul>
+     * <li><code>YUV_REPROCESSING</code> capability (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
+     *   <code>YUV_REPROCESSING</code>)</li>
+     * <li><code>RAW</code> capability (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
+     *   <code>RAW</code>)</li>
+     * </ul>
+     *
+     * @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
+     */
+    ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_3                          = 3,
+
+} acamera_metadata_enum_android_info_supported_hardware_level_t;
+
+
+// ACAMERA_BLACK_LEVEL_LOCK
+typedef enum acamera_metadata_enum_acamera_black_level_lock {
+    ACAMERA_BLACK_LEVEL_LOCK_OFF                                     = 0,
+
+    ACAMERA_BLACK_LEVEL_LOCK_ON                                      = 1,
+
+} acamera_metadata_enum_android_black_level_lock_t;
+
+
+// ACAMERA_SYNC_FRAME_NUMBER
+typedef enum acamera_metadata_enum_acamera_sync_frame_number {
+    /**
+     * <p>The current result is not yet fully synchronized to any request.</p>
+     * <p>Synchronization is in progress, and reading metadata from this
+     * result may include a mix of data that have taken effect since the
+     * last synchronization time.</p>
+     * <p>In some future result, within ACAMERA_SYNC_MAX_LATENCY frames,
+     * this value will update to the actual frame number frame number
+     * the result is guaranteed to be synchronized to (as long as the
+     * request settings remain constant).</p>
+     *
+     * @see ACAMERA_SYNC_MAX_LATENCY
+     */
+    ACAMERA_SYNC_FRAME_NUMBER_CONVERGING                             = -1,
+
+    /**
+     * <p>The current result's synchronization status is unknown.</p>
+     * <p>The result may have already converged, or it may be in
+     * progress.  Reading from this result may include some mix
+     * of settings from past requests.</p>
+     * <p>After a settings change, the new settings will eventually all
+     * take effect for the output buffers and results. However, this
+     * value will not change when that happens. Altering settings
+     * rapidly may provide outcomes using mixes of settings from recent
+     * requests.</p>
+     * <p>This value is intended primarily for backwards compatibility with
+     * the older camera implementations (for android.hardware.Camera).</p>
+     */
+    ACAMERA_SYNC_FRAME_NUMBER_UNKNOWN                                = -2,
+
+} acamera_metadata_enum_android_sync_frame_number_t;
+
+// ACAMERA_SYNC_MAX_LATENCY
+typedef enum acamera_metadata_enum_acamera_sync_max_latency {
+    /**
+     * <p>Every frame has the requests immediately applied.</p>
+     * <p>Changing controls over multiple requests one after another will
+     * produce results that have those controls applied atomically
+     * each frame.</p>
+     * <p>All FULL capability devices will have this as their maxLatency.</p>
+     */
+    ACAMERA_SYNC_MAX_LATENCY_PER_FRAME_CONTROL                       = 0,
+
+    /**
+     * <p>Each new frame has some subset (potentially the entire set)
+     * of the past requests applied to the camera settings.</p>
+     * <p>By submitting a series of identical requests, the camera device
+     * will eventually have the camera settings applied, but it is
+     * unknown when that exact point will be.</p>
+     * <p>All LEGACY capability devices will have this as their maxLatency.</p>
+     */
+    ACAMERA_SYNC_MAX_LATENCY_UNKNOWN                                 = -1,
+
+} acamera_metadata_enum_android_sync_max_latency_t;
+
+
+
+// ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS
+typedef enum acamera_metadata_enum_acamera_depth_available_depth_stream_configurations {
+    ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT       = 0,
+
+    ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_INPUT        = 1,
+
+} acamera_metadata_enum_android_depth_available_depth_stream_configurations_t;
+
+// ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE
+typedef enum acamera_metadata_enum_acamera_depth_depth_is_exclusive {
+    ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE_FALSE                           = 0,
+
+    ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE_TRUE                            = 1,
+
+} acamera_metadata_enum_android_depth_depth_is_exclusive_t;
+
+
+
+#endif //_NDK_CAMERA_METADATA_TAGS_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCaptureRequest.h b/include/camera/ndk/NdkCaptureRequest.h
new file mode 100644
index 0000000..cd97f4d
--- /dev/null
+++ b/include/camera/ndk/NdkCaptureRequest.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCaptureRequest.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+#include <android/native_window.h>
+#include "NdkCameraError.h"
+#include "NdkCameraMetadata.h"
+
+#ifndef _NDK_CAPTURE_REQUEST_H
+#define _NDK_CAPTURE_REQUEST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Container for output targets
+typedef struct ACameraOutputTargets ACameraOutputTargets;
+
+// Container for a single output target
+typedef struct ACameraOutputTarget ACameraOutputTarget;
+
+/**
+ * ACaptureRequest is an opaque type that contains settings and output targets needed to capture
+ * a single image from camera device.
+ *
+ * <p>ACaptureRequest contains the configuration for the capture hardware (sensor, lens, flash),
+ * the processing pipeline, the control algorithms, and the output buffers. Also
+ * contains the list of target {@link ANativeWindow}s to send image data to for this
+ * capture.</p>
+ *
+ * <p>ACaptureRequest is created by {@link ACameraDevice_createCaptureRequest}.
+ *
+ * <p>ACaptureRequest is given to {@link ACameraCaptureSession_capture} or
+ * {@link ACameraCaptureSession_setRepeatingRequest} to capture images from a camera.</p>
+ *
+ * <p>Each request can specify a different subset of target {@link ANativeWindow}s for the
+ * camera to send the captured data to. All the {@link ANativeWindow}s used in a request must
+ * be part of the {@link ANativeWindow} list given to the last call to
+ * {@link ACameraDevice_createCaptureSession}, when the request is submitted to the
+ * session.</p>
+ *
+ * <p>For example, a request meant for repeating preview might only include the
+ * {@link ANativeWindow} for the preview SurfaceView or SurfaceTexture, while a
+ * high-resolution still capture would also include a {@link ANativeWindow} from a
+ * {@link AImageReader} configured for high-resolution JPEG images.</p>
+ *
+ * @see ACameraDevice_createCaptureRequest
+ * @see ACameraCaptureSession_capture
+ * @see ACameraCaptureSession_setRepeatingRequest
+ */
+typedef struct ACaptureRequest ACaptureRequest;
+
+/**
+ * Create a ACameraOutputTarget object.
+ *
+ * <p>The ACameraOutputTarget is used in {@link ACaptureRequest_addTarget} method to add an output
+ * {@link ANativeWindow} to ACaptureRequest. Use {@link ACameraOutputTarget_free} to free the object
+ * and its memory after application no longer needs the {@link ACameraOutputTarget}.</p>
+ *
+ * @param window the {@link ANativeWindow} to be associated with the {@link ACameraOutputTarget}
+ * @param output the output {@link ACameraOutputTarget} will be stored here if the
+ *                  method call succeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds. The created ACameraOutputTarget will
+ *                                be filled in the output argument.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if window or output is NULL.</li></ul>
+ *
+ * @see ACaptureRequest_addTarget
+ */
+camera_status_t ACameraOutputTarget_create(ANativeWindow* window, ACameraOutputTarget** output);
+
+/**
+ * Free a ACameraOutputTarget object.
+ *
+ * @param output the {@link ACameraOutputTarget} to be freed.
+ *
+ * @see ACameraOutputTarget_create
+ */
+void ACameraOutputTarget_free(ACameraOutputTarget* output);
+
+/**
+ * Add an {@link ACameraOutputTarget} object to {@link ACaptureRequest}.
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param output the output {@link ACameraOutputTarget} to be added to capture request.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or output is NULL.</li></ul>
+ */
+camera_status_t ACaptureRequest_addTarget(ACaptureRequest* request,
+        const ACameraOutputTarget* output);
+
+/**
+ * Remove an {@link ACameraOutputTarget} object from {@link ACaptureRequest}.
+ *
+ * <p>This method has no effect if the ACameraOutputTarget does not exist in ACaptureRequest.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param output the output {@link ACameraOutputTarget} to be removed from capture request.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or output is NULL.</li></ul>
+ */
+camera_status_t ACaptureRequest_removeTarget(ACaptureRequest* request,
+        const ACameraOutputTarget* output);
+
+/**
+ * Get a metadata entry from input {@link ACaptureRequest}.
+ *
+ * <p>The memory of the data field in returned entry is managed by camera framework. Do not
+ * attempt to free it.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param tag the tag value of the camera metadata entry to be get.
+ * @param entry the output {@link ACameraMetadata_const_entry} will be filled here if the method
+ *        call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if metadata or entry is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_METADATA_NOT_FOUND} if the capture request does not contain an
+ *             entry of input tag value.</li></ul>
+ */
+camera_status_t ACaptureRequest_getConstEntry(
+        const ACaptureRequest* request, uint32_t tag, ACameraMetadata_const_entry* entry);
+
+/*
+ * List all the entry tags in input {@link ACaptureRequest}.
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param numEntries number of metadata entries in input {@link ACaptureRequest}
+ * @param tags the tag values of the metadata entries. Length of tags is returned in numEntries
+ *             argument. The memory is managed by ACaptureRequest itself and must NOT be free/delete
+ *             by application. Calling ACaptureRequest_setEntry_* methods will invalidate previous
+ *             output of ACaptureRequest_getAllTags. Do not access tags after calling
+ *             ACaptureRequest_setEntry_*. To get new list of tags after updating capture request,
+ *             application must call ACaptureRequest_getAllTags again. Do NOT access tags after
+ *             calling ACaptureRequest_free.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request, numEntries or tags is NULL.</li>
+ *         <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ */
+camera_status_t ACaptureRequest_getAllTags(
+        const ACaptureRequest* request, /*out*/int32_t* numTags, /*out*/const uint32_t** tags);
+
+/**
+ * Set/change a camera capture control entry with unsigned 8 bits data type.
+ *
+ * <p>Set count to 0 and data to NULL to remove a tag from the capture request.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param tag the tag value of the camera metadata entry to be set.
+ * @param count number of elements to be set in data argument
+ * @param data the entries to be set/change in the capture request.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL, count is larger than
+ *             zero while data is NULL, the data type of the tag is not unsigned 8 bits, or
+ *             the tag is not controllable by application.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_u8(
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const uint8_t* data);
+
+/**
+ * Set/change a camera capture control entry with signed 32 bits data type.
+ *
+ * <p>Set count to 0 and data to NULL to remove a tag from the capture request.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param tag the tag value of the camera metadata entry to be set.
+ * @param count number of elements to be set in data argument
+ * @param data the entries to be set/change in the capture request.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL, count is larger than
+ *             zero while data is NULL, the data type of the tag is not signed 32 bits, or
+ *             the tag is not controllable by application.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_i32(
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const int32_t* data);
+
+/**
+ * Set/change a camera capture control entry with float data type.
+ *
+ * <p>Set count to 0 and data to NULL to remove a tag from the capture request.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param tag the tag value of the camera metadata entry to be set.
+ * @param count number of elements to be set in data argument
+ * @param data the entries to be set/change in the capture request.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL, count is larger than
+ *             zero while data is NULL, the data type of the tag is not float, or
+ *             the tag is not controllable by application.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_float(
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const float* data);
+
+/**
+ * Set/change a camera capture control entry with signed 64 bits data type.
+ *
+ * <p>Set count to 0 and data to NULL to remove a tag from the capture request.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param tag the tag value of the camera metadata entry to be set.
+ * @param count number of elements to be set in data argument
+ * @param data the entries to be set/change in the capture request.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL, count is larger than
+ *             zero while data is NULL, the data type of the tag is not signed 64 bits, or
+ *             the tag is not controllable by application.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_i64(
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const int64_t* data);
+
+/**
+ * Set/change a camera capture control entry with double data type.
+ *
+ * <p>Set count to 0 and data to NULL to remove a tag from the capture request.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param tag the tag value of the camera metadata entry to be set.
+ * @param count number of elements to be set in data argument
+ * @param data the entries to be set/change in the capture request.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL, count is larger than
+ *             zero while data is NULL, the data type of the tag is not double, or
+ *             the tag is not controllable by application.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_double(
+        ACaptureRequest* request, uint32_t tag, uint32_t count, const double* data);
+
+/**
+ * Set/change a camera capture control entry with rational data type.
+ *
+ * <p>Set count to 0 and data to NULL to remove a tag from the capture request.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param tag the tag value of the camera metadata entry to be set.
+ * @param count number of elements to be set in data argument
+ * @param data the entries to be set/change in the capture request.
+ *
+ * @return <ul>
+ *         <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ *         <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL, count is larger than
+ *             zero while data is NULL, the data type of the tag is not rational, or
+ *             the tag is not controllable by application.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_rational(
+        ACaptureRequest* request, uint32_t tag, uint32_t count,
+        const ACameraMetadata_rational* data);
+
+/**
+ * Free a {@link ACaptureRequest} structure.
+ *
+ * @param request the {@link ACaptureRequest} to be freed.
+ */
+void ACaptureRequest_free(ACaptureRequest* request);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // _NDK_CAPTURE_REQUEST_H
+
+/** @} */
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
index 7be449c..458d170 100644
--- a/include/media/AudioBufferProvider.h
+++ b/include/media/AudioBufferProvider.h
@@ -40,12 +40,6 @@
 
     virtual ~AudioBufferProvider() {}
 
-    // value representing an invalid presentation timestamp
-    static const int64_t kInvalidPTS = 0x7FFFFFFFFFFFFFFFLL;    // <stdint.h> is too painful
-
-    // pts is the local time when the next sample yielded by getNextBuffer
-    // will be rendered.
-    // Pass kInvalidPTS if the PTS is unknown or not applicable.
     // On entry:
     //  buffer              != NULL
     //  buffer->raw         unused
@@ -59,7 +53,7 @@
     //  status              != NO_ERROR
     //  buffer->raw         NULL
     //  buffer->frameCount  0
-    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0;
+    virtual status_t getNextBuffer(Buffer* buffer) = 0;
 
     // Release (a portion of) the buffer previously obtained by getNextBuffer().
     // It is permissible to call releaseBuffer() multiple times per getNextBuffer().
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 5af6c10..6af1962 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -139,7 +139,7 @@
      *               of descriptors to return.
      *               *count is limited to kMaxPreProcessing on return.
      */
-    static status_t queryDefaultPreProcessing(int audioSession,
+    static status_t queryDefaultPreProcessing(audio_session_t audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count);
 
@@ -237,7 +237,7 @@
                   int32_t priority = 0,
                   effect_callback_t cbf = NULL,
                   void* user = NULL,
-                  int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                  audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
                   audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                   );
 
@@ -250,7 +250,7 @@
                     int32_t priority = 0,
                     effect_callback_t cbf = NULL,
                     void* user = NULL,
-                    int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                    audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
                     audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                     );
 
@@ -272,7 +272,7 @@
                             int32_t priority = 0,
                             effect_callback_t cbf = NULL,
                             void* user = NULL,
-                            int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                            audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
                             audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                             );
 
@@ -405,7 +405,7 @@
 
 protected:
      bool                    mEnabled;           // enable state
-     int32_t                 mSessionId;         // audio session ID
+     audio_session_t         mSessionId;         // audio session ID
      int32_t                 mPriority;          // priority for effect control
      status_t                mStatus;            // effect status
      effect_callback_t       mCbf;               // callback function for status, control and
diff --git a/include/media/AudioIoDescriptor.h b/include/media/AudioIoDescriptor.h
index c94b738..fed86c9 100644
--- a/include/media/AudioIoDescriptor.h
+++ b/include/media/AudioIoDescriptor.h
@@ -35,7 +35,7 @@
     AudioIoDescriptor() :
         mIoHandle(AUDIO_IO_HANDLE_NONE),
         mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(AUDIO_CHANNEL_NONE),
-        mFrameCount(0), mLatency(0)
+        mFrameCount(0), mFrameCountHAL(0), mLatency(0)
     {
         memset(&mPatch, 0, sizeof(struct audio_patch));
     }
@@ -56,13 +56,14 @@
         return AUDIO_PORT_HANDLE_NONE;
     }
 
-    audio_io_handle_t mIoHandle;
-    struct audio_patch mPatch;
-    uint32_t mSamplingRate;
-    audio_format_t mFormat;
-    audio_channel_mask_t mChannelMask;
-    size_t mFrameCount;
-    uint32_t mLatency;
+    audio_io_handle_t       mIoHandle;
+    struct audio_patch      mPatch;
+    uint32_t                mSamplingRate;
+    audio_format_t          mFormat;
+    audio_channel_mask_t    mChannelMask;
+    size_t                  mFrameCount;
+    size_t                  mFrameCountHAL;
+    uint32_t                mLatency;   // only valid for output
 };
 
 
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
index feed402..8528c7a 100644
--- a/include/media/AudioPolicy.h
+++ b/include/media/AudioPolicy.h
@@ -28,11 +28,13 @@
 
 // Keep in sync with AudioMix.java, AudioMixingRule.java, AudioPolicyConfig.java
 #define RULE_EXCLUSION_MASK 0x8000
-#define RULE_MATCH_ATTRIBUTE_USAGE 0x1
+#define RULE_MATCH_ATTRIBUTE_USAGE           0x1
 #define RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET (0x1 << 1)
-#define RULE_EXCLUDE_ATTRIBUTE_USAGE (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_USAGE)
+#define RULE_MATCH_UID                      (0x1 << 2)
+#define RULE_EXCLUDE_ATTRIBUTE_USAGE  (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_USAGE)
 #define RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET \
-    (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET)
+                                      (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET)
+#define RULE_EXCLUDE_UID              (RULE_EXCLUSION_MASK|RULE_MATCH_UID)
 
 #define MIX_TYPE_INVALID -1
 #define MIX_TYPE_PLAYERS 0
@@ -49,14 +51,15 @@
 
 #define MIX_ROUTE_FLAG_RENDER 0x1
 #define MIX_ROUTE_FLAG_LOOP_BACK (0x1 << 1)
+#define MIX_ROUTE_FLAG_ALL (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK)
 
 #define MAX_MIXES_PER_POLICY 10
 #define MAX_CRITERIA_PER_MIX 20
 
-class AttributeMatchCriterion {
+class AudioMixMatchCriterion {
 public:
-    AttributeMatchCriterion() {}
-    AttributeMatchCriterion(audio_usage_t usage, audio_source_t source, uint32_t rule);
+    AudioMixMatchCriterion() {}
+    AudioMixMatchCriterion(audio_usage_t usage, audio_source_t source, uint32_t rule);
 
     status_t readFromParcel(Parcel *parcel);
     status_t writeToParcel(Parcel *parcel) const;
@@ -64,7 +67,8 @@
     union {
         audio_usage_t   mUsage;
         audio_source_t  mSource;
-    } mAttr;
+        uid_t           mUid;
+    } mValue;
     uint32_t        mRule;
 };
 
@@ -75,22 +79,30 @@
     static const uint32_t kCbFlagNotifyActivity = 0x1;
 
     AudioMix() {}
-    AudioMix(Vector<AttributeMatchCriterion> criteria, uint32_t mixType, audio_config_t format,
+    AudioMix(Vector<AudioMixMatchCriterion> criteria, uint32_t mixType, audio_config_t format,
              uint32_t routeFlags, String8 registrationId, uint32_t flags) :
         mCriteria(criteria), mMixType(mixType), mFormat(format),
-        mRouteFlags(routeFlags), mRegistrationId(registrationId), mCbFlags(flags){}
+        mRouteFlags(routeFlags), mDeviceAddress(registrationId), mCbFlags(flags){}
 
     status_t readFromParcel(Parcel *parcel);
     status_t writeToParcel(Parcel *parcel) const;
 
-    Vector<AttributeMatchCriterion> mCriteria;
+    Vector<AudioMixMatchCriterion> mCriteria;
     uint32_t        mMixType;
     audio_config_t  mFormat;
     uint32_t        mRouteFlags;
-    String8         mRegistrationId;
+    audio_devices_t mDeviceType;
+    String8         mDeviceAddress;
     uint32_t        mCbFlags; // flags indicating which callbacks to use, see kCbFlag*
 };
 
+
+// definitions for audio recording configuration updates
+// which update type is reported
+#define RECORD_CONFIG_EVENT_NONE -1
+#define RECORD_CONFIG_EVENT_START 1
+#define RECORD_CONFIG_EVENT_STOP  0
+
 }; // namespace android
 
 #endif  // ANDROID_AUDIO_POLICY_H
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index c4c7b0e..2fa1a4e 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -19,7 +19,9 @@
 
 #include <cutils/sched_policy.h>
 #include <media/AudioSystem.h>
+#include <media/AudioTimestamp.h>
 #include <media/IAudioRecord.h>
+#include <media/Modulo.h>
 #include <utils/threads.h>
 
 namespace android {
@@ -143,7 +145,7 @@
      * Parameters:
      *
      * inputSource:        Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT).
-     * sampleRate:         Data sink sampling rate in Hz.
+     * sampleRate:         Data sink sampling rate in Hz.  Zero means to use the source sample rate.
      * format:             Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
      *                     16 bits per sample).
      * channelMask:        Channel mask, such that audio_is_input_channel(channelMask) is true.
@@ -175,7 +177,7 @@
                                     callback_t cbf = NULL,
                                     void* user = NULL,
                                     uint32_t notificationFrames = 0,
-                                    int sessionId = AUDIO_SESSION_ALLOCATE,
+                                    audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
                                     int uid = -1,
@@ -213,7 +215,7 @@
                             void* user = NULL,
                             uint32_t notificationFrames = 0,
                             bool threadCanCallJava = false,
-                            int sessionId = AUDIO_SESSION_ALLOCATE,
+                            audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
                             audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
                             int uid = -1,
@@ -247,7 +249,7 @@
      * the specified event occurs on the specified trigger session.
      */
             status_t    start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
-                              int triggerSession = 0);
+                              audio_session_t triggerSession = AUDIO_SESSION_NONE);
 
     /* Stop a track.  The callback will cease being called.  Note that obtainBuffer() still
      * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK.
@@ -256,6 +258,7 @@
             bool        stopped() const;
 
     /* Return the sink sample rate for this record track in Hz.
+     * If specified as zero in constructor or set(), this will be the source sample rate.
      * Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
      */
             uint32_t    getSampleRate() const   { return mSampleRate; }
@@ -313,6 +316,17 @@
      */
             status_t    getPosition(uint32_t *position) const;
 
+    /* Return the record timestamp.
+     *
+     * Parameters:
+     *  timestamp: A pointer to the timestamp to be filled.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *  - NO_ERROR: successful operation
+     *  - BAD_VALUE: timestamp is NULL
+     */
+            status_t getTimestamp(ExtendedTimestamp *timestamp);
+
     /* Returns a handle on the audio input used by this AudioRecord.
      *
      * Parameters:
@@ -338,7 +352,7 @@
      *
      * No lock needed because session ID doesn't change after first set().
      */
-            int    getSessionId() const { return mSessionId; }
+            audio_session_t getSessionId() const { return mSessionId; }
 
     /* Public API for TRANSFER_OBTAIN mode.
      * Obtains a buffer of up to "audioBuffer->frameCount" full frames.
@@ -526,7 +540,7 @@
 
             // caller must hold lock on mLock for all _l methods
 
-            status_t openRecord_l(size_t epoch, const String16& opPackageName);
+            status_t openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
 
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreRecord_l(const char *from);
@@ -556,9 +570,9 @@
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
     uint32_t                mObservedSequence;      // last observed value of mSequence
 
-    uint32_t                mMarkerPosition;        // in wrapping (overflow) frame units
+    Modulo<uint32_t>        mMarkerPosition;        // in wrapping (overflow) frame units
     bool                    mMarkerReached;
-    uint32_t                mNewPosition;           // in frames
+    Modulo<uint32_t>        mNewPosition;           // in frames
     uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
 
     status_t                mStatus;
@@ -570,6 +584,11 @@
     size_t                  mReqFrameCount;         // frame count to request the first or next time
                                                     // a new IAudioRecord is needed, non-decreasing
 
+    int64_t                 mFramesRead;            // total frames read. reset to zero after
+                                                    // the start() following stop(). It is not
+                                                    // changed after restoring the track.
+    int64_t                 mFramesReadServerOffset; // An offset to server frames read due to
+                                                    // restoring AudioRecord, or stop/start.
     // constant after constructor or set()
     uint32_t                mSampleRate;
     audio_format_t          mFormat;
@@ -577,8 +596,14 @@
     size_t                  mFrameSize;         // app-level frame size == AudioFlinger frame size
     uint32_t                mLatency;           // in ms
     audio_channel_mask_t    mChannelMask;
-    audio_input_flags_t     mFlags;
-    int                     mSessionId;
+
+    audio_input_flags_t     mFlags;                 // same as mOrigFlags, except for bits that may
+                                                    // be denied by client or server, such as
+                                                    // AUDIO_INPUT_FLAG_FAST.  mLock must be
+                                                    // held to read or write those bits reliably.
+    audio_input_flags_t     mOrigFlags;             // as specified in constructor or set(), const
+
+    audio_session_t         mSessionId;
     transfer_type           mTransfer;
 
     // Next 5 fields may be changed if IAudioRecord is re-created, but always != 0
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 26a0bb2..2e6646a 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -31,6 +31,9 @@
 
 typedef void (*audio_error_callback)(status_t err);
 typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
+typedef void (*record_config_callback)(int event, audio_session_t session, int source,
+                const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+                audio_patch_handle_t patchHandle);
 
 class IAudioFlinger;
 class IAudioPolicyService;
@@ -40,6 +43,8 @@
 {
 public:
 
+    // FIXME Declare in binder opcode order, similarly to IAudioFlinger.h and IAudioFlinger.cpp
+
     /* These are static methods to control the system-wide AudioFlinger
      * only privileged processes can have access to them
      */
@@ -92,6 +97,7 @@
 
     static void setErrorCallback(audio_error_callback cb);
     static void setDynPolicyCallback(dynamic_policy_callback cb);
+    static void setRecordConfigCallback(record_config_callback);
 
     // helper function to obtain AudioFlinger service handle
     static const sp<IAudioFlinger> get_audio_flinger();
@@ -110,11 +116,12 @@
     // FIXME This API assumes a route, and so should be deprecated.
     static status_t getOutputLatency(uint32_t* latency,
             audio_stream_type_t stream);
-    static status_t getSamplingRate(audio_io_handle_t output,
+    // returns the audio HAL sample rate
+    static status_t getSamplingRate(audio_io_handle_t ioHandle,
                                           uint32_t* samplingRate);
-    // returns the number of frames per audio HAL write buffer. Corresponds to
-    // audio_stream->get_buffer_size()/audio_stream_out_frame_size()
-    static status_t getFrameCount(audio_io_handle_t output,
+    // For output threads with a fast mixer, returns the number of frames per normal mixer buffer.
+    // For output threads without a fast mixer, or for input, this is same as getFrameCountHAL().
+    static status_t getFrameCount(audio_io_handle_t ioHandle,
                                   size_t* frameCount);
     // returns the audio output latency in ms. Corresponds to
     // audio_stream_out->get_latency()
@@ -146,12 +153,12 @@
     // Allocate a new unique ID for use as an audio session ID or I/O handle.
     // If unable to contact AudioFlinger, returns AUDIO_UNIQUE_ID_ALLOCATE instead.
     // FIXME If AudioFlinger were to ever exhaust the unique ID namespace,
-    //       this method could fail by returning either AUDIO_UNIQUE_ID_ALLOCATE
+    //       this method could fail by returning either a reserved ID like AUDIO_UNIQUE_ID_ALLOCATE
     //       or an unspecified existing unique ID.
-    static audio_unique_id_t newAudioUniqueId();
+    static audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
 
-    static void acquireAudioSessionId(int audioSession, pid_t pid);
-    static void releaseAudioSessionId(int audioSession, pid_t pid);
+    static void acquireAudioSessionId(audio_session_t audioSession, pid_t pid);
+    static void releaseAudioSessionId(audio_session_t audioSession, pid_t pid);
 
     // Get the HW synchronization source used for an audio session.
     // Return a valid source or AUDIO_HW_SYNC_INVALID if an error occurs
@@ -161,6 +168,12 @@
     // Indicate JAVA services are ready (scheduling, power management ...)
     static status_t systemReady();
 
+    // Returns the number of frames per audio HAL buffer.
+    // Corresponds to audio_stream->get_buffer_size()/audio_stream_in_frame_size() for input.
+    // See also getFrameCount().
+    static status_t getFrameCountHAL(audio_io_handle_t ioHandle,
+                                     size_t* frameCount);
+
     // Events used to synchronize actions between audio sessions.
     // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until
     // playback is complete on another audio session.
@@ -225,6 +238,7 @@
     static status_t getInputForAttr(const audio_attributes_t *attr,
                                     audio_io_handle_t *input,
                                     audio_session_t session,
+                                    pid_t pid,
                                     uid_t uid,
                                     uint32_t samplingRate,
                                     audio_format_t format,
@@ -255,7 +269,7 @@
     static status_t registerEffect(const effect_descriptor_t *desc,
                                     audio_io_handle_t io,
                                     uint32_t strategy,
-                                    int session,
+                                    audio_session_t session,
                                     int id);
     static status_t unregisterEffect(int id);
     static status_t setEffectEnabled(int id, bool enabled);
@@ -319,6 +333,8 @@
                                       audio_io_handle_t *handle);
     static status_t stopAudioSource(audio_io_handle_t handle);
 
+    static status_t setMasterMono(bool mono);
+    static status_t getMasterMono(bool *mono);
 
     // ----------------------------------------------------------------------------
 
@@ -419,6 +435,9 @@
         virtual void onAudioPortListUpdate();
         virtual void onAudioPatchListUpdate();
         virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+        virtual void onRecordingConfigurationUpdate(int event, audio_session_t session,
+                        audio_source_t source, const audio_config_base_t *clientConfig,
+                        const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
 
     private:
         Mutex                               mLock;
@@ -438,6 +457,7 @@
     static sp<IAudioFlinger> gAudioFlinger;
     static audio_error_callback gAudioErrorCallback;
     static dynamic_policy_callback gDynPolicyCallback;
+    static record_config_callback gRecordConfigCallback;
 
     static size_t gInBuffSize;
     // previous parameters for recording buffer size queries
diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h
index 99e9c3e..498de8e 100644
--- a/include/media/AudioTimestamp.h
+++ b/include/media/AudioTimestamp.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_AUDIO_TIMESTAMP_H
 #define ANDROID_AUDIO_TIMESTAMP_H
 
+#include <string>
+#include <sstream>
 #include <time.h>
 
 namespace android {
@@ -32,6 +34,123 @@
     struct timespec mTime;     // corresponding CLOCK_MONOTONIC when frame is expected to present
 };
 
+struct alignas(8) /* bug 29096183, bug 29108507 */ ExtendedTimestamp {
+    enum Location {
+        LOCATION_INVALID = -1,
+        // Locations in the audio playback / record pipeline.
+        LOCATION_CLIENT,   // timestamp of last read frame from client-server track buffer.
+        LOCATION_SERVER,   // timestamp of newest frame from client-server track buffer.
+        LOCATION_KERNEL,   // timestamp of newest frame in the kernel (alsa) buffer.
+
+        // Historical data: info when the kernel timestamp was OK (prior to the newest frame).
+        // This may be useful when the newest frame kernel timestamp is unavailable.
+        // Available for playback timestamps.
+        LOCATION_SERVER_LASTKERNELOK, // timestamp of server the prior time kernel timestamp OK.
+        LOCATION_KERNEL_LASTKERNELOK, // timestamp of kernel the prior time kernel timestamp OK.
+        LOCATION_MAX       // for sizing arrays only
+    };
+
+    // This needs to be kept in sync with android.media.AudioTimestamp
+    enum Timebase {
+        TIMEBASE_MONOTONIC,  // Clock monotonic offset (generally 0)
+        TIMEBASE_BOOTTIME,
+        TIMEBASE_MAX,
+    };
+
+    ExtendedTimestamp() {
+        clear();
+    }
+
+    // mPosition is expressed in frame units.
+    // It is generally nonnegative, though we keep this signed for
+    // to potentially express algorithmic latency at the start of the stream
+    // and to prevent unintentional unsigned integer underflow.
+    int64_t mPosition[LOCATION_MAX];
+
+    // mTimeNs is in nanoseconds for the default timebase, monotonic.
+    // If this value is -1, then both time and position are invalid.
+    // If this value is 0, then the time is not valid but the position is valid.
+    int64_t mTimeNs[LOCATION_MAX];
+
+    // mTimebaseOffset is the offset in ns from monotonic when the
+    // timestamp was taken.  This may vary due to suspend time
+    // or NTP adjustment.
+    int64_t mTimebaseOffset[TIMEBASE_MAX];
+
+    // Playback only:
+    // mFlushed is number of flushed frames before entering the server mix;
+    // hence not included in mPosition. This is used for adjusting server positions
+    // information for frames "dropped".
+    // FIXME: This variable should be eliminated, with the offset added on the server side
+    // before sending to client, but differences in legacy position offset handling
+    // and new extended timestamps require this to be maintained as a separate quantity.
+    int64_t mFlushed;
+
+    // Call to reset the timestamp to the original (invalid) state
+    void clear() {
+        memset(mPosition, 0, sizeof(mPosition)); // actually not necessary if time is -1
+        for (int i = 0; i < LOCATION_MAX; ++i) {
+            mTimeNs[i] = -1;
+        }
+        memset(mTimebaseOffset, 0, sizeof(mTimebaseOffset));
+        mFlushed = 0;
+    }
+
+    // Returns the best timestamp as judged from the closest-to-hw stage in the
+    // pipeline with a valid timestamp.  If the optional location parameter is non-null,
+    // it will be filled with the location where the time was obtained.
+    status_t getBestTimestamp(
+            int64_t *position, int64_t *time, int timebase, Location *location = nullptr) const {
+        if (position == nullptr || time == nullptr
+                || timebase < 0 || timebase >= TIMEBASE_MAX) {
+            return BAD_VALUE;
+        }
+        // look for the closest-to-hw stage in the pipeline with a valid timestamp.
+        // We omit LOCATION_CLIENT as we prefer at least LOCATION_SERVER based accuracy
+        // when getting the best timestamp.
+        for (int i = LOCATION_KERNEL; i >= LOCATION_SERVER; --i) {
+            if (mTimeNs[i] > 0) {
+                *position = mPosition[i];
+                *time = mTimeNs[i] + mTimebaseOffset[timebase];
+                if (location != nullptr) {
+                    *location = (Location)i;
+                }
+                return OK;
+            }
+        }
+        return INVALID_OPERATION;
+    }
+
+    status_t getBestTimestamp(AudioTimestamp *timestamp, Location *location = nullptr) const {
+        if (timestamp == nullptr) {
+            return BAD_VALUE;
+        }
+        int64_t position, time;
+        if (getBestTimestamp(&position, &time, TIMEBASE_MONOTONIC, location) == OK) {
+            timestamp->mPosition = position;
+            timestamp->mTime.tv_sec = time / 1000000000;
+            timestamp->mTime.tv_nsec = time - timestamp->mTime.tv_sec * 1000000000LL;
+            return OK;
+        }
+        return INVALID_OPERATION;
+    }
+
+    // convert fields to a printable string
+    std::string toString() {
+        std::stringstream ss;
+
+        ss << "BOOTTIME offset " << mTimebaseOffset[TIMEBASE_BOOTTIME] << "\n";
+        for (int i = 0; i < LOCATION_MAX; ++i) {
+            ss << "ExtendedTimestamp[" << i << "]  position: "
+                    << mPosition[i] << "  time: "  << mTimeNs[i] << "\n";
+        }
+        return ss.str();
+    }
+    // TODO:
+    // Consider adding buffer status:
+    // size, available, algorithmic latency
+};
+
 }   // namespace
 
 #endif  // ANDROID_AUDIO_TIMESTAMP_H
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index e02f1b7..88c4e61 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -22,6 +22,7 @@
 #include <media/AudioTimestamp.h>
 #include <media/IAudioTrack.h>
 #include <media/AudioResamplerPublic.h>
+#include <media/Modulo.h>
 #include <utils/threads.h>
 
 namespace android {
@@ -166,7 +167,10 @@
      *
      * streamType:         Select the type of audio stream this track is attached to
      *                     (e.g. AUDIO_STREAM_MUSIC).
-     * sampleRate:         Data source sampling rate in Hz.
+     * sampleRate:         Data source sampling rate in Hz.  Zero means to use the sink sample rate.
+     *                     A non-zero value must be specified if AUDIO_OUTPUT_FLAG_DIRECT is set.
+     *                     0 will not work with current policy implementation for direct output
+     *                     selection where an exact match is needed for sampling rate.
      * format:             Audio format. For mixed tracks, any PCM format supported by server is OK.
      *                     For direct and offloaded tracks, the possible format(s) depends on the
      *                     output sink.
@@ -182,8 +186,21 @@
      *                     and inform of marker, position updates, etc.
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
-     *                     frames have been consumed from track input buffer.
-     *                     This is expressed in units of frames at the initial source sample rate.
+     *                     frames have been consumed from track input buffer by server.
+     *                     Zero means to use a default value, which is typically:
+     *                      - fast tracks: HAL buffer size, even if track frameCount is larger
+     *                      - normal tracks: 1/2 of track frameCount
+     *                     A positive value means that many frames at initial source sample rate.
+     *                     A negative value for this parameter specifies the negative of the
+     *                     requested number of notifications (sub-buffers) in the entire buffer.
+     *                     For fast tracks, the FastMixer will process one sub-buffer at a time.
+     *                     The size of each sub-buffer is determined by the HAL.
+     *                     To get "double buffering", for example, one should pass -2.
+     *                     The minimum number of sub-buffers is 1 (expressed as -1),
+     *                     and the maximum number of sub-buffers is 8 (expressed as -8).
+     *                     Negative is only permitted for fast tracks, and if frameCount is zero.
+     *                     TODO It is ugly to overload a parameter in this way depending on
+     *                     whether it is positive, negative, or zero.  Consider splitting apart.
      * sessionId:          Specific session ID, or zero to use default.
      * transferType:       How data is transferred to AudioTrack.
      * offloadInfo:        If not NULL, provides offload parameters for
@@ -197,6 +214,10 @@
                            binder to AudioFlinger.
                            It will return an error instead.  The application will recreate
                            the track based on offloading or different channel configuration, etc.
+     * maxRequiredSpeed:   For PCM tracks, this creates an appropriate buffer size that will allow
+     *                     maxRequiredSpeed playback. Values less than 1.0f and greater than
+     *                     AUDIO_TIMESTRETCH_SPEED_MAX will be clamped.  For non-PCM tracks
+     *                     and direct or offloaded tracks, this parameter is ignored.
      * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
 
@@ -208,14 +229,15 @@
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                     callback_t cbf       = NULL,
                                     void* user           = NULL,
-                                    uint32_t notificationFrames = 0,
-                                    int sessionId        = AUDIO_SESSION_ALLOCATE,
+                                    int32_t notificationFrames = 0,
+                                    audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     const audio_offload_info_t *offloadInfo = NULL,
                                     int uid = -1,
                                     pid_t pid = -1,
                                     const audio_attributes_t* pAttributes = NULL,
-                                    bool doNotReconnect = false);
+                                    bool doNotReconnect = false,
+                                    float maxRequiredSpeed = 1.0f);
 
     /* Creates an audio track and registers it with AudioFlinger.
      * With this constructor, the track is configured for static buffer mode.
@@ -237,14 +259,15 @@
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                     callback_t cbf      = NULL,
                                     void* user          = NULL,
-                                    uint32_t notificationFrames = 0,
-                                    int sessionId       = AUDIO_SESSION_ALLOCATE,
+                                    int32_t notificationFrames = 0,
+                                    audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     const audio_offload_info_t *offloadInfo = NULL,
                                     int uid = -1,
                                     pid_t pid = -1,
                                     const audio_attributes_t* pAttributes = NULL,
-                                    bool doNotReconnect = false);
+                                    bool doNotReconnect = false,
+                                    float maxRequiredSpeed = 1.0f);
 
     /* Terminates the AudioTrack and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioTrack.
@@ -280,16 +303,17 @@
                             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                             callback_t cbf      = NULL,
                             void* user          = NULL,
-                            uint32_t notificationFrames = 0,
+                            int32_t notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
-                            int sessionId       = AUDIO_SESSION_ALLOCATE,
+                            audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
                             const audio_offload_info_t *offloadInfo = NULL,
                             int uid = -1,
                             pid_t pid = -1,
                             const audio_attributes_t* pAttributes = NULL,
-                            bool doNotReconnect = false);
+                            bool doNotReconnect = false,
+                            float maxRequiredSpeed = 1.0f);
 
     /* Result of constructing the AudioTrack. This must be checked for successful initialization
      * before using any AudioTrack API (except for set()), because using
@@ -304,6 +328,11 @@
      */
             uint32_t    latency() const     { return mLatency; }
 
+    /* Returns the number of application-level buffer underruns
+     * since the AudioTrack was created.
+     */
+            uint32_t    getUnderrunCount() const;
+
     /* getters, see constructors and set() */
 
             audio_stream_type_t streamType() const;
@@ -319,6 +348,31 @@
             uint32_t    channelCount() const { return mChannelCount; }
             size_t      frameCount() const  { return mFrameCount; }
 
+    // TODO consider notificationFrames() if needed
+
+    /* Return effective size of audio buffer that an application writes to
+     * or a negative error if the track is uninitialized.
+     */
+            ssize_t     getBufferSizeInFrames();
+
+    /* Returns the buffer duration in microseconds at current playback rate.
+     */
+            status_t    getBufferDurationInUs(int64_t *duration);
+
+    /* Set the effective size of audio buffer that an application writes to.
+     * This is used to determine the amount of available room in the buffer,
+     * which determines when a write will block.
+     * This allows an application to raise and lower the audio latency.
+     * The requested size may be adjusted so that it is
+     * greater or equal to the absolute minimum and
+     * less than or equal to the getBufferCapacityInFrames().
+     * It may also be adjusted slightly for internal reasons.
+     *
+     * Return the final size or a negative error if the track is unitialized
+     * or does not support variable sizes.
+     */
+            ssize_t     setBufferSizeInFrames(size_t size);
+
     /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
             sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
 
@@ -370,11 +424,14 @@
             status_t    setAuxEffectSendLevel(float level);
             void        getAuxEffectSendLevel(float* level) const;
 
-    /* Set source sample rate for this track in Hz, mostly used for games' sound effects
+    /* Set source sample rate for this track in Hz, mostly used for games' sound effects.
+     * Zero is not permitted.
      */
             status_t    setSampleRate(uint32_t sampleRate);
 
-    /* Return current source sample rate in Hz */
+    /* Return current source sample rate in Hz.
+     * If specified as zero in constructor or set(), this will be the sink sample rate.
+     */
             uint32_t    getSampleRate() const;
 
     /* Return the original source sample rate in Hz. This corresponds to the sample rate
@@ -552,7 +609,7 @@
      * Returned value:
      *  AudioTrack session ID.
      */
-            int    getSessionId() const { return mSessionId; }
+            audio_session_t getSessionId() const { return mSessionId; }
 
     /* Attach track auxiliary output to specified effect. Use effectId = 0
      * to detach track from effect.
@@ -696,12 +753,54 @@
      *                     because the audio device changed or AudioFlinger died.
      *                     This typically occurs for direct or offload tracks
      *                     or if mDoNotReconnect is true.
-     *         INVALID_OPERATION  if called on a FastTrack, wrong state, or some other error.
+     *         INVALID_OPERATION  wrong state, or some other error.
      *
      * The timestamp parameter is undefined on return, if status is not NO_ERROR.
      */
             status_t    getTimestamp(AudioTimestamp& timestamp);
 
+    /* Return the extended timestamp, with additional timebase info and improved drain behavior.
+     *
+     * This is similar to the AudioTrack.java API:
+     * getTimestamp(@NonNull AudioTimestamp timestamp, @AudioTimestamp.Timebase int timebase)
+     *
+     * Some differences between this method and the getTimestamp(AudioTimestamp& timestamp) method
+     *
+     *   1. stop() by itself does not reset the frame position.
+     *      A following start() resets the frame position to 0.
+     *   2. flush() by itself does not reset the frame position.
+     *      The frame position advances by the number of frames flushed,
+     *      when the first frame after flush reaches the audio sink.
+     *   3. BOOTTIME clock offsets are provided to help synchronize with
+     *      non-audio streams, e.g. sensor data.
+     *   4. Position is returned with 64 bits of resolution.
+     *
+     * Parameters:
+     *  timestamp: A pointer to the caller allocated ExtendedTimestamp.
+     *
+     * Returns NO_ERROR    on success; timestamp is filled with valid data.
+     *         BAD_VALUE   if timestamp is NULL.
+     *         WOULD_BLOCK if called immediately after start() when the number
+     *                     of frames consumed is less than the
+     *                     overall hardware latency to physical output. In WOULD_BLOCK cases,
+     *                     one might poll again, or use getPosition(), or use 0 position and
+     *                     current time for the timestamp.
+     *                     If WOULD_BLOCK is returned, the timestamp is still
+     *                     modified with the LOCATION_CLIENT portion filled.
+     *         DEAD_OBJECT if AudioFlinger dies or the output device changes and
+     *                     the track cannot be automatically restored.
+     *                     The application needs to recreate the AudioTrack
+     *                     because the audio device changed or AudioFlinger died.
+     *                     This typically occurs for direct or offloaded tracks
+     *                     or if mDoNotReconnect is true.
+     *         INVALID_OPERATION  if called on a offloaded or direct track.
+     *                     Use getTimestamp(AudioTimestamp& timestamp) instead.
+     */
+            status_t getTimestamp(ExtendedTimestamp *timestamp);
+private:
+            status_t getTimestamp_l(ExtendedTimestamp *timestamp);
+public:
+
     /* Add an AudioDeviceCallback. The caller will be notified when the audio device to which this
      * AudioTrack is routed is updated.
      * Replaces any previously installed callback.
@@ -724,6 +823,23 @@
             status_t removeAudioDeviceCallback(
                     const sp<AudioSystem::AudioDeviceCallback>& callback);
 
+    /* Obtain the pending duration in milliseconds for playback of pure PCM
+     * (mixable without embedded timing) data remaining in AudioTrack.
+     *
+     * This is used to estimate the drain time for the client-server buffer
+     * so the choice of ExtendedTimestamp::LOCATION_SERVER is default.
+     * One may optionally request to find the duration to play through the HAL
+     * by specifying a location ExtendedTimestamp::LOCATION_KERNEL; however,
+     * INVALID_OPERATION may be returned if the kernel location is unavailable.
+     *
+     * Returns NO_ERROR  if successful.
+     *         INVALID_OPERATION if ExtendedTimestamp::LOCATION_KERNEL cannot be obtained
+     *                   or the AudioTrack does not contain pure PCM data.
+     *         BAD_VALUE if msec is nullptr or location is invalid.
+     */
+            status_t pendingDuration(int32_t *msec,
+                    ExtendedTimestamp::Location location = ExtendedTimestamp::LOCATION_SERVER);
+
 protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
@@ -783,6 +899,8 @@
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreTrack_l(const char *from);
 
+            uint32_t    getUnderrunCount_l() const;
+
             bool     isOffloaded() const;
             bool     isDirect() const;
             bool     isOffloadedOrDirect() const;
@@ -797,12 +915,19 @@
             bool     isDirect_l() const
                 { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
 
+            // pure pcm data is mixable (which excludes HW_AV_SYNC, with embedded timing)
+            bool     isPurePcmData_l() const
+                { return audio_is_linear_pcm(mFormat)
+                        && (mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) == 0; }
+
             // increment mPosition by the delta of mServer, and return new value of mPosition
-            uint32_t updateAndGetPosition_l();
+            Modulo<uint32_t> updateAndGetPosition_l();
 
             // check sample rate and speed is compatible with AudioTrack
             bool     isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
 
+            void     restartIfDisabled();
+
     // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -810,14 +935,19 @@
     audio_io_handle_t       mOutput;                // returned by AudioSystem::getOutput()
 
     sp<AudioTrackThread>    mAudioTrackThread;
+    bool                    mThreadCanCallJava;
 
     float                   mVolume[2];
     float                   mSendLevel;
     mutable uint32_t        mSampleRate;            // mutable because getSampleRate() can update it
     uint32_t                mOriginalSampleRate;
     AudioPlaybackRate       mPlaybackRate;
-    size_t                  mFrameCount;            // corresponds to current IAudioTrack, value is
-                                                    // reported back by AudioFlinger to the client
+    float                   mMaxRequiredSpeed;      // use PCM buffer size to allow this speed
+
+    // Corresponds to current IAudioTrack, value is reported back by AudioFlinger to the client.
+    // This allocated buffer size is maintained by the proxy.
+    size_t                  mFrameCount;            // maximum size of buffer
+
     size_t                  mReqFrameCount;         // frame count to request the first or next time
                                                     // a new IAudioTrack is needed, non-decreasing
 
@@ -862,9 +992,16 @@
     void*                   mUserData;
 
     // for notification APIs
+
+    // next 2 fields are const after constructor or set()
     uint32_t                mNotificationFramesReq; // requested number of frames between each
                                                     // notification callback,
                                                     // at initial source sample rate
+    uint32_t                mNotificationsPerBufferReq;
+                                                    // requested number of notifications per buffer,
+                                                    // currently only used for fast tracks with
+                                                    // default track buffer size
+
     uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                     // notification callback,
                                                     // at initial source sample rate
@@ -885,19 +1022,19 @@
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
     uint32_t                mObservedSequence;      // last observed value of mSequence
 
-    uint32_t                mMarkerPosition;        // in wrapping (overflow) frame units
+    Modulo<uint32_t>        mMarkerPosition;        // in wrapping (overflow) frame units
     bool                    mMarkerReached;
-    uint32_t                mNewPosition;           // in frames
+    Modulo<uint32_t>        mNewPosition;           // in frames
     uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
 
-    uint32_t                mServer;                // in frames, last known mProxy->getPosition()
+    Modulo<uint32_t>        mServer;                // in frames, last known mProxy->getPosition()
                                                     // which is count of frames consumed by server,
                                                     // reset by new IAudioTrack,
                                                     // whether it is reset by stop() is TBD
-    uint32_t                mPosition;              // in frames, like mServer except continues
+    Modulo<uint32_t>        mPosition;              // in frames, like mServer except continues
                                                     // monotonically after new IAudioTrack,
                                                     // and could be easily widened to uint64_t
-    uint32_t                mReleased;              // in frames, count of frames released to server
+    Modulo<uint32_t>        mReleased;              // count of frames released to server
                                                     // but not necessarily consumed by server,
                                                     // reset by stop() but continues monotonically
                                                     // after new IAudioTrack to restore mPosition,
@@ -909,19 +1046,30 @@
     bool                    mTimestampStartupGlitchReported; // reduce log spam
     bool                    mRetrogradeMotionReported; // reduce log spam
     AudioTimestamp          mPreviousTimestamp;     // used to detect retrograde motion
+    ExtendedTimestamp::Location mPreviousLocation;  // location used for previous timestamp
 
-    audio_output_flags_t    mFlags;
-        // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
-        // mLock must be held to read or write those bits reliably.
+    uint32_t                mUnderrunCountOffset;   // updated when restoring tracks
+
+    int64_t                 mFramesWritten;         // total frames written. reset to zero after
+                                                    // the start() following stop(). It is not
+                                                    // changed after restoring the track or
+                                                    // after flush.
+    int64_t                 mFramesWrittenServerOffset; // An offset to server frames due to
+                                                    // restoring AudioTrack, or stop/start.
+
+    audio_output_flags_t    mFlags;                 // same as mOrigFlags, except for bits that may
+                                                    // be denied by client or server, such as
+                                                    // AUDIO_OUTPUT_FLAG_FAST.  mLock must be
+                                                    // held to read or write those bits reliably.
+    audio_output_flags_t    mOrigFlags;             // as specified in constructor or set(), const
 
     bool                    mDoNotReconnect;
 
-    int                     mSessionId;
+    audio_session_t         mSessionId;
     int                     mAuxEffectId;
 
     mutable Mutex           mLock;
 
-    bool                    mIsTimed;
     int                     mPreviousPriority;          // before start()
     SchedPolicy             mPreviousSchedulingGroup;
     bool                    mAwaitBoost;    // thread should wait for priority boost before running
@@ -959,29 +1107,6 @@
     sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
 };
 
-class TimedAudioTrack : public AudioTrack
-{
-public:
-    TimedAudioTrack();
-
-    /* allocate a shared memory buffer that can be passed to queueTimedBuffer */
-    status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer);
-
-    /* queue a buffer obtained via allocateTimedBuffer for playback at the
-       given timestamp.  PTS units are microseconds on the media time timeline.
-       The media time transform (set with setMediaTimeTransform) set by the
-       audio producer will handle converting from media time to local time
-       (perhaps going through the common time timeline in the case of
-       synchronized multiroom audio case) */
-    status_t queueTimedBuffer(const sp<IMemory>& buffer, int64_t pts);
-
-    /* define a transform between media time and either common time or
-       local time */
-    enum TargetTimeline {LOCAL_TIME, COMMON_TIME};
-    status_t setMediaTimeTransform(const LinearTransform& xform,
-                                   TargetTimeline target);
-};
-
 }; // namespace android
 
 #endif // ANDROID_AUDIOTRACK_H
diff --git a/media/libmediaplayerservice/Crypto.h b/include/media/Crypto.h
similarity index 95%
rename from media/libmediaplayerservice/Crypto.h
rename to include/media/Crypto.h
index 99ea95d..7d181d3 100644
--- a/media/libmediaplayerservice/Crypto.h
+++ b/include/media/Crypto.h
@@ -50,10 +50,11 @@
     virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
 
     virtual ssize_t decrypt(
-            bool secure,
+            DestinationType dstType,
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
+            const CryptoPlugin::Pattern &pattern,
             const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
diff --git a/media/libmediaplayerservice/Drm.h b/include/media/Drm.h
similarity index 98%
rename from media/libmediaplayerservice/Drm.h
rename to include/media/Drm.h
index 056723c..d40019b 100644
--- a/media/libmediaplayerservice/Drm.h
+++ b/include/media/Drm.h
@@ -77,8 +77,6 @@
                                               Vector<uint8_t> &certificate,
                                               Vector<uint8_t> &wrappedKey);
 
-    virtual status_t unprovisionDevice();
-
     virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
     virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
 
diff --git a/media/libmediaplayerservice/DrmSessionClientInterface.h b/include/media/DrmSessionClientInterface.h
similarity index 100%
rename from media/libmediaplayerservice/DrmSessionClientInterface.h
rename to include/media/DrmSessionClientInterface.h
diff --git a/media/libmediaplayerservice/DrmSessionManager.h b/include/media/DrmSessionManager.h
similarity index 100%
rename from media/libmediaplayerservice/DrmSessionManager.h
rename to include/media/DrmSessionManager.h
diff --git a/include/media/ExtendedAudioBufferProvider.h b/include/media/ExtendedAudioBufferProvider.h
index 2539ed3..168ceed 100644
--- a/include/media/ExtendedAudioBufferProvider.h
+++ b/include/media/ExtendedAudioBufferProvider.h
@@ -27,11 +27,11 @@
     virtual size_t  framesReady() const = 0;  // see description at AudioFlinger.h
 
     // Return the total number of frames that have been obtained and released
-    virtual size_t  framesReleased() const { return 0; }
+    virtual int64_t  framesReleased() const { return 0; }
 
     // Invoked by buffer consumer when a new timestamp is available.
     // Default implementation ignores the timestamp.
-    virtual void    onTimestamp(const AudioTimestamp& timestamp) { }
+    virtual void    onTimestamp(const ExtendedTimestamp& timestamp) { }
 };
 
 }   // namespace android
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 5051aff..984bc02 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -47,7 +47,8 @@
     // or-able bits shared by createTrack and openRecord, but not all combinations make sense
     enum {
         TRACK_DEFAULT = 0,  // client requests a default AudioTrack
-        TRACK_TIMED   = 1,  // client requests a TimedAudioTrack
+        // FIXME: obsolete
+        // TRACK_TIMED= 1,  // client requests a TimedAudioTrack
         TRACK_FAST    = 2,  // client requests a fast AudioTrack or AudioRecord
         TRACK_OFFLOAD = 4,  // client requests offload to hw codec
         TRACK_DIRECT = 8,   // client requests a direct output
@@ -72,8 +73,9 @@
                                 // reference and will release it when the track is destroyed.
                                 // However on failure, the client is responsible for release.
                                 audio_io_handle_t output,
+                                pid_t pid,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
-                                int *sessionId,
+                                audio_session_t *sessionId,
                                 int clientUid,
                                 status_t *status) = 0;
 
@@ -88,22 +90,26 @@
                                 const String16& callingPackage,
                                 size_t *pFrameCount,
                                 track_flags_t *flags,
+                                pid_t pid,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
                                 int clientUid,
-                                int *sessionId,
+                                audio_session_t *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
                                 sp<IMemory>& buffers,   // return value 0 means it follows cblk
                                 status_t *status) = 0;
 
-    // FIXME Surprisingly, sampleRate/format/frameCount/latency don't work for input handles
+    // FIXME Surprisingly, format/latency don't work for input handles
 
     /* query the audio hardware state. This state never changes,
      * and therefore can be cached.
      */
-    virtual     uint32_t    sampleRate(audio_io_handle_t output) const = 0;
+    virtual     uint32_t    sampleRate(audio_io_handle_t ioHandle) const = 0;
+
+    // reserved; formerly channelCount()
+
     virtual     audio_format_t format(audio_io_handle_t output) const = 0;
-    virtual     size_t      frameCount(audio_io_handle_t output) const = 0;
+    virtual     size_t      frameCount(audio_io_handle_t ioHandle) const = 0;
 
     // return estimated latency in milliseconds
     virtual     uint32_t    latency(audio_io_handle_t output) const = 0;
@@ -181,10 +187,10 @@
 
     virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
 
-    virtual audio_unique_id_t newAudioUniqueId() = 0;
+    virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) = 0;
 
-    virtual void acquireAudioSessionId(int audioSession, pid_t pid) = 0;
-    virtual void releaseAudioSessionId(int audioSession, pid_t pid) = 0;
+    virtual void acquireAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
+    virtual void releaseAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
 
     virtual status_t queryNumberEffects(uint32_t *numEffects) const = 0;
 
@@ -199,13 +205,13 @@
                                     int32_t priority,
                                     // AudioFlinger doesn't take over handle reference from client
                                     audio_io_handle_t output,
-                                    int sessionId,
+                                    audio_session_t sessionId,
                                     const String16& callingPackage,
                                     status_t *status,
                                     int *id,
                                     int *enabled) = 0;
 
-    virtual status_t moveEffects(int session, audio_io_handle_t srcOutput,
+    virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
                                     audio_io_handle_t dstOutput) = 0;
 
     virtual audio_module_handle_t loadHwModule(const char *name) = 0;
@@ -246,6 +252,9 @@
 
     /* Indicate JAVA services are ready (scheduling, power management ...) */
     virtual status_t systemReady() = 0;
+
+    // Returns the number of frames per audio HAL buffer.
+    virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
 };
 
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 6b93f6f..0e9e3bc 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -59,16 +59,16 @@
                                         audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                         const audio_offload_info_t *offloadInfo = NULL) = 0;
     virtual status_t getOutputForAttr(const audio_attributes_t *attr,
-                                        audio_io_handle_t *output,
-                                        audio_session_t session,
-                                        audio_stream_type_t *stream,
-                                        uid_t uid,
-                                        uint32_t samplingRate = 0,
-                                        audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                        audio_channel_mask_t channelMask = 0,
-                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                        audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
-                                        const audio_offload_info_t *offloadInfo = NULL) = 0;
+                                      audio_io_handle_t *output,
+                                      audio_session_t session,
+                                      audio_stream_type_t *stream,
+                                      uid_t uid,
+                                      uint32_t samplingRate = 0,
+                                      audio_format_t format = AUDIO_FORMAT_DEFAULT,
+                                      audio_channel_mask_t channelMask = 0,
+                                      audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                      audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                                      const audio_offload_info_t *offloadInfo = NULL) = 0;
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  audio_session_t session) = 0;
@@ -81,6 +81,7 @@
     virtual status_t  getInputForAttr(const audio_attributes_t *attr,
                               audio_io_handle_t *input,
                               audio_session_t session,
+                              pid_t pid,
                               uid_t uid,
                               uint32_t samplingRate,
                               audio_format_t format,
@@ -108,7 +109,7 @@
     virtual status_t registerEffect(const effect_descriptor_t *desc,
                                     audio_io_handle_t io,
                                     uint32_t strategy,
-                                    int session,
+                                    audio_session_t session,
                                     int id) = 0;
     virtual status_t unregisterEffect(int id) = 0;
     virtual status_t setEffectEnabled(int id, bool enabled) = 0;
@@ -116,7 +117,7 @@
     virtual bool     isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0)
                              const = 0;
     virtual bool     isSourceActive(audio_source_t source) const = 0;
-    virtual status_t queryDefaultPreProcessing(int audioSession,
+    virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count) = 0;
    // Check if offload is possible for given format, stream type, sample rate,
@@ -165,6 +166,9 @@
                                       const audio_attributes_t *attributes,
                                       audio_io_handle_t *handle) = 0;
     virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
+
+    virtual status_t setMasterMono(bool mono) = 0;
+    virtual status_t getMasterMono(bool *mono) = 0;
 };
 
 
diff --git a/include/media/IAudioPolicyServiceClient.h b/include/media/IAudioPolicyServiceClient.h
index a7f2cc3..d94ad00 100644
--- a/include/media/IAudioPolicyServiceClient.h
+++ b/include/media/IAudioPolicyServiceClient.h
@@ -37,6 +37,12 @@
     virtual void onAudioPatchListUpdate() = 0;
     // Notifies a change in the mixing state of a specific mix in a dynamic audio policy
     virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
+    // Notifies a change of audio recording configuration
+    virtual void onRecordingConfigurationUpdate(int event, audio_session_t session,
+            audio_source_t source,
+            const audio_config_base_t *clientConfig,
+            const audio_config_base_t *deviceConfig,
+            audio_patch_handle_t patchHandle) = 0;
 };
 
 
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index 2003985..7768176 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -24,6 +24,7 @@
 #include <utils/Errors.h>
 #include <binder/IInterface.h>
 #include <binder/IMemory.h>
+#include <system/audio.h>
 
 namespace android {
 
@@ -37,7 +38,8 @@
     /* After it's created the track is not active. Call start() to
      * make it active.
      */
-    virtual status_t    start(int /*AudioSystem::sync_event_t*/ event, int triggerSession) = 0;
+    virtual status_t    start(int /*AudioSystem::sync_event_t*/ event,
+                              audio_session_t triggerSession) = 0;
 
     /* Stop a track. If set, the callback will cease being called and
      * obtainBuffer will return an error. Buffers that are already released
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 619ac78..a31cec6 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -24,7 +24,6 @@
 #include <utils/Errors.h>
 #include <binder/IInterface.h>
 #include <binder/IMemory.h>
-#include <utils/LinearTransform.h>
 #include <utils/String8.h>
 #include <media/AudioTimestamp.h>
 
@@ -67,24 +66,6 @@
      */
     virtual status_t    attachAuxEffect(int effectId) = 0;
 
-
-    /* Allocate a shared memory buffer suitable for holding timed audio
-       samples */
-    virtual status_t    allocateTimedBuffer(size_t size,
-                                            sp<IMemory>* buffer) = 0;
-
-    /* Queue a buffer obtained via allocateTimedBuffer for playback at the given
-       timestamp */
-    virtual status_t    queueTimedBuffer(const sp<IMemory>& buffer,
-                                         int64_t pts) = 0;
-
-    /* Define the linear transform that will be applied to the timestamps
-       given to queueTimedBuffer (which are expressed in media time).
-       Target specifies whether this transform converts media time to local time
-       or Tungsten time. The values for target are defined in AudioTrack.h */
-    virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
-                                              int target) = 0;
-
     /* Send parameters to the audio hardware */
     virtual status_t    setParameters(const String8& keyValuePairs) = 0;
 
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
index ea316de..a4bfaf8 100644
--- a/include/media/ICrypto.h
+++ b/include/media/ICrypto.h
@@ -46,11 +46,18 @@
 
     virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) = 0;
 
+    enum DestinationType {
+        kDestinationTypeVmPointer,    // non-secure
+        kDestinationTypeOpaqueHandle, // secure
+        kDestinationTypeNativeHandle  // secure
+    };
+
     virtual ssize_t decrypt(
-            bool secure,
+            DestinationType dstType,
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
+            const CryptoPlugin::Pattern &pattern,
             const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
diff --git a/include/media/IDataSource.h b/include/media/IDataSource.h
index 07e46f7..655f337 100644
--- a/include/media/IDataSource.h
+++ b/include/media/IDataSource.h
@@ -20,10 +20,12 @@
 #include <binder/IInterface.h>
 #include <media/stagefright/foundation/ABase.h>
 #include <utils/Errors.h>
+#include <utils/String8.h>
 
 namespace android {
 
 class IMemory;
+class DecryptHandle;
 
 // A binder interface for implementing a stagefright DataSource remotely.
 class IDataSource : public IInterface {
@@ -41,6 +43,13 @@
     // This should be called before deleting |this|. The other methods may
     // return errors if they're called after calling close().
     virtual void close() = 0;
+    // Get the flags of the source.
+    // Refer to DataSource:Flags for the definition of the flags.
+    virtual uint32_t getFlags() = 0;
+    // get a description of the source, e.g. the url or filename it is based on
+    virtual String8 toString() = 0;
+    // Initialize DRM and return a DecryptHandle.
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime) = 0;
 
 private:
     DISALLOW_EVIL_CONSTRUCTORS(IDataSource);
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index 9449beb6..fd51fd0 100644
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -71,8 +71,6 @@
                                               Vector<uint8_t> &certificate,
                                               Vector<uint8_t> &wrappedKey) = 0;
 
-    virtual status_t unprovisionDevice() = 0;
-
     virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) = 0;
     virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) = 0;
 
diff --git a/include/media/IMediaCodecService.h b/include/media/IMediaCodecService.h
new file mode 100644
index 0000000..984a0fd
--- /dev/null
+++ b/include/media/IMediaCodecService.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIACODECSERVICE_H
+#define ANDROID_IMEDIACODECSERVICE_H
+
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <media/IDataSource.h>
+#include <include/OMX.h>
+
+namespace android {
+
+class IMediaCodecService: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(MediaCodecService);
+
+    virtual sp<IOMX> getOMX() = 0;
+};
+
+class BnMediaCodecService: public BnInterface<IMediaCodecService>
+{
+public:
+    virtual status_t    onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+                                uint32_t flags = 0);
+};
+
+}   // namespace android
+
+#endif  // ANDROID_IMEDIACODECSERVICE_H
diff --git a/include/media/IMediaDrmService.h b/include/media/IMediaDrmService.h
new file mode 100644
index 0000000..323fae5
--- /dev/null
+++ b/include/media/IMediaDrmService.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIADRMSERVICE_H
+#define ANDROID_IMEDIADRMSERVICE_H
+
+#include <utils/Errors.h>  // for status_t
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+struct ICrypto;
+struct IDrm;
+
+class IMediaDrmService: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(MediaDrmService);
+
+    virtual sp<ICrypto>         makeCrypto() = 0;
+    virtual sp<IDrm>            makeDrm() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnMediaDrmService: public BnInterface<IMediaDrmService>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IMEDIADRMSERVICE_H
diff --git a/include/media/IMediaExtractor.h b/include/media/IMediaExtractor.h
new file mode 100644
index 0000000..34b15e9
--- /dev/null
+++ b/include/media/IMediaExtractor.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef IMEDIA_EXTRACTOR_BASE_H_
+
+#define IMEDIA_EXTRACTOR_BASE_H_
+
+#include <media/IMediaSource.h>
+#include <media/stagefright/DataSource.h>
+
+namespace android {
+
+class MetaData;
+
+class IMediaExtractor : public IInterface {
+public:
+    DECLARE_META_INTERFACE(MediaExtractor);
+
+    virtual size_t countTracks() = 0;
+    virtual sp<IMediaSource> getTrack(size_t index) = 0;
+
+    enum GetTrackMetaDataFlags {
+        kIncludeExtensiveMetaData = 1
+    };
+    virtual sp<MetaData> getTrackMetaData(
+            size_t index, uint32_t flags = 0) = 0;
+
+    // Return container specific meta-data. The default implementation
+    // returns an empty metadata object.
+    virtual sp<MetaData> getMetaData() = 0;
+
+    enum Flags {
+        CAN_SEEK_BACKWARD  = 1,  // the "seek 10secs back button"
+        CAN_SEEK_FORWARD   = 2,  // the "seek 10secs forward button"
+        CAN_PAUSE          = 4,
+        CAN_SEEK           = 8,  // the "seek bar"
+    };
+
+    // If subclasses do _not_ override this, the default is
+    // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
+    virtual uint32_t flags() const = 0;
+
+    // for DRM
+    virtual void setDrmFlag(bool flag) = 0;
+    virtual bool getDrmFlag() = 0;
+    virtual char* getDrmTrackInfo(size_t trackID, int *len)  = 0;
+    virtual void setUID(uid_t uid)  = 0;
+
+    virtual const char * name() = 0;
+};
+
+
+class BnMediaExtractor: public BnInterface<IMediaExtractor>
+{
+public:
+    virtual status_t    onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+                                uint32_t flags = 0);
+};
+
+void registerMediaExtractor(
+        const sp<IMediaExtractor> &extractor,
+        const sp<DataSource> &source,
+        const char *mime);
+
+void registerMediaSource(
+        const sp<IMediaExtractor> &extractor,
+        const sp<IMediaSource> &source);
+
+status_t dumpExtractors(int fd, const Vector<String16>& args);
+
+
+}  // namespace android
+
+#endif  // IMEDIA_EXTRACTOR_BASE_H_
diff --git a/include/media/IMediaExtractorService.h b/include/media/IMediaExtractorService.h
new file mode 100644
index 0000000..4d7b317
--- /dev/null
+++ b/include/media/IMediaExtractorService.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIAEXTRACTORSERVICE_H
+#define ANDROID_IMEDIAEXTRACTORSERVICE_H
+
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <media/IDataSource.h>
+#include <media/IMediaExtractor.h>
+
+namespace android {
+
+class IMediaExtractorService: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(MediaExtractorService);
+
+    virtual sp<IMediaExtractor> makeExtractor(const sp<IDataSource> &source, const char *mime) = 0;
+
+};
+
+class BnMediaExtractorService: public BnInterface<IMediaExtractorService>
+{
+public:
+    virtual status_t    onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+                                uint32_t flags = 0);
+};
+
+}   // namespace android
+
+#endif  // ANDROID_IMEDIAEXTRACTORSERVICE_H
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index a316ce2..8266b0b 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -31,8 +31,6 @@
 
 namespace android {
 
-struct ICrypto;
-struct IDrm;
 struct IHDCP;
 struct IMediaCodecList;
 struct IMediaHTTPService;
@@ -49,12 +47,9 @@
 
     virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName) = 0;
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
-    virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0)
-            = 0;
-
+    virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
+            audio_session_t audioSessionId = AUDIO_SESSION_ALLOCATE) = 0;
     virtual sp<IOMX>            getOMX() = 0;
-    virtual sp<ICrypto>         makeCrypto() = 0;
-    virtual sp<IDrm>            makeDrm() = 0;
     virtual sp<IHDCP>           makeHDCP(bool createEncryptionModule) = 0;
     virtual sp<IMediaCodecList> getCodecList() const = 0;
 
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 77ed5d3..68a65f0 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -23,7 +23,9 @@
 namespace android {
 
 class Surface;
+namespace hardware {
 class ICamera;
+}
 class ICameraRecordingProxy;
 class IMediaRecorderClient;
 class IGraphicBufferConsumer;
@@ -34,7 +36,7 @@
 public:
     DECLARE_META_INTERFACE(MediaRecorder);
 
-    virtual status_t setCamera(const sp<ICamera>& camera,
+    virtual status_t setCamera(const sp<hardware::ICamera>& camera,
                                const sp<ICameraRecordingProxy>& proxy) = 0;
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
     virtual status_t setVideoSource(int vs) = 0;
@@ -53,6 +55,8 @@
     virtual status_t start() = 0;
     virtual status_t stop() = 0;
     virtual status_t reset() = 0;
+    virtual status_t pause() = 0;
+    virtual status_t resume() = 0;
     virtual status_t init() = 0;
     virtual status_t close() = 0;
     virtual status_t release() = 0;
diff --git a/include/media/IMediaSource.h b/include/media/IMediaSource.h
new file mode 100644
index 0000000..709f425
--- /dev/null
+++ b/include/media/IMediaSource.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef IMEDIA_SOURCE_BASE_H_
+
+#define IMEDIA_SOURCE_BASE_H_
+
+#include <binder/IInterface.h>
+#include <media/stagefright/MediaErrors.h>
+
+namespace android {
+
+struct MediaSource;
+class MetaData;
+class MediaBuffer;
+class MediaBufferGroup;
+
+class IMediaSource : public IInterface {
+public:
+    DECLARE_META_INTERFACE(MediaSource);
+
+    enum {
+        // Maximum number of buffers would be read in readMultiple.
+        kMaxNumReadMultiple = 128,
+    };
+
+    // To be called before any other methods on this object, except
+    // getFormat().
+    virtual status_t start(MetaData *params = NULL) = 0;
+
+    // Any blocking read call returns immediately with a result of NO_INIT.
+    // It is an error to call any methods other than start after this call
+    // returns. Any buffers the object may be holding onto at the time of
+    // the stop() call are released.
+    // Also, it is imperative that any buffers output by this object and
+    // held onto by callers be released before a call to stop() !!!
+    virtual status_t stop() = 0;
+
+    // Returns the format of the data output by this media source.
+    virtual sp<MetaData> getFormat() = 0;
+
+    // Options that modify read() behaviour. The default is to
+    // a) not request a seek
+    // b) not be late, i.e. lateness_us = 0
+    struct ReadOptions {
+        enum SeekMode {
+            SEEK_PREVIOUS_SYNC,
+            SEEK_NEXT_SYNC,
+            SEEK_CLOSEST_SYNC,
+            SEEK_CLOSEST,
+        };
+
+        ReadOptions();
+
+        // Reset everything back to defaults.
+        void reset();
+
+        void setSeekTo(int64_t time_us, SeekMode mode = SEEK_CLOSEST_SYNC);
+        void clearSeekTo();
+        bool getSeekTo(int64_t *time_us, SeekMode *mode) const;
+
+        void setLateBy(int64_t lateness_us);
+        int64_t getLateBy() const;
+
+        void setNonBlocking();
+        void clearNonBlocking();
+        bool getNonBlocking() const;
+
+    private:
+        enum Options {
+            kSeekTo_Option      = 1,
+        };
+
+        uint32_t mOptions;
+        int64_t mSeekTimeUs;
+        SeekMode mSeekMode;
+        int64_t mLatenessUs;
+        bool mNonBlocking;
+    };
+
+    // Returns a new buffer of data. Call blocks until a
+    // buffer is available, an error is encountered or the end of the stream
+    // is reached.
+    // End of stream is signalled by a result of ERROR_END_OF_STREAM.
+    // A result of INFO_FORMAT_CHANGED indicates that the format of this
+    // MediaSource has changed mid-stream, the client can continue reading
+    // but should be prepared for buffers of the new configuration.
+    virtual status_t read(
+            MediaBuffer **buffer, const ReadOptions *options = NULL) = 0;
+
+    // Returns a vector of new buffers of data. The vector size could be
+    // <= |maxNumBuffers|. Used for buffers with small size
+    // since all buffer data are passed back by binder, not shared memory.
+    // Call blocks until an error is encountered, or the end of the stream is
+    // reached, or format change is hit, or |kMaxNumReadMultiple| buffers have
+    // been read.
+    // End of stream is signalled by a result of ERROR_END_OF_STREAM.
+    // A result of INFO_FORMAT_CHANGED indicates that the format of this
+    // MediaSource has changed mid-stream, the client can continue reading
+    // but should be prepared for buffers of the new configuration.
+    virtual status_t readMultiple(
+            Vector<MediaBuffer *> *buffers, uint32_t maxNumBuffers = 1) = 0;
+
+    // Causes this source to suspend pulling data from its upstream source
+    // until a subsequent read-with-seek. Currently only supported by
+    // OMXCodec.
+    virtual status_t pause()  = 0;
+
+    // The consumer of this media source requests that the given buffers
+    // are to be returned exclusively in response to read calls.
+    // This will be called after a successful start() and before the
+    // first read() call.
+    // Callee assumes ownership of the buffers if no error is returned.
+    virtual status_t setBuffers(const Vector<MediaBuffer *> & /* buffers */) = 0;
+
+};
+
+class BnMediaSource: public BnInterface<IMediaSource>
+{
+public:
+    BnMediaSource();
+
+    virtual status_t    onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+                                uint32_t flags = 0);
+
+    virtual status_t pause() {
+        return ERROR_UNSUPPORTED;
+    }
+
+    virtual status_t setBuffers(const Vector<MediaBuffer *> & /* buffers */) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    virtual status_t readMultiple(
+            Vector<MediaBuffer *> * /* buffers */, uint32_t /* maxNumBuffers = 1 */) {
+        return ERROR_UNSUPPORTED;
+    }
+protected:
+    virtual ~BnMediaSource();
+
+private:
+    MediaBufferGroup *mGroup;
+};
+
+
+}  // namespace android
+
+#endif  // IMEDIA_SOURCE_BASE_H_
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 27ad694..15d691f 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -37,6 +37,7 @@
 class IMemory;
 class IOMXObserver;
 class IOMXRenderer;
+class NativeHandle;
 class Surface;
 
 class IOMX : public IInterface {
@@ -59,6 +60,7 @@
 
     virtual status_t allocateNode(
             const char *name, const sp<IOMXObserver> &observer,
+            sp<IBinder> *nodeBinder,
             node_id *node) = 0;
 
     virtual status_t freeNode(node_id node) = 0;
@@ -98,8 +100,8 @@
             node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
             OMX_U32 audioHwSync, native_handle_t **sidebandHandle) = 0;
 
-    virtual status_t enableGraphicBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0;
+    virtual status_t enableNativeBuffers(
+            node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) = 0;
 
     virtual status_t getGraphicBufferUsage(
             node_id node, OMX_U32 port_index, OMX_U32* usage) = 0;
@@ -117,10 +119,14 @@
             node_id node, OMX_U32 port_index,
             const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0;
 
+    virtual status_t updateNativeHandleInMeta(
+            node_id node, OMX_U32 port_index,
+            const sp<NativeHandle> &nativeHandle, buffer_id buffer) = 0;
+
     // This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
     // well as on success.
     virtual status_t createInputSurface(
-            node_id node, OMX_U32 port_index,
+            node_id node, OMX_U32 port_index, android_dataspace dataSpace,
             sp<IGraphicBufferProducer> *bufferProducer,
             MetadataBufferType *type = NULL) = 0;
 
@@ -137,13 +143,14 @@
 
     virtual status_t signalEndOfInputStream(node_id node) = 0;
 
-    // This API clearly only makes sense if the caller lives in the
-    // same process as the callee, i.e. is the media_server, as the
-    // returned "buffer_data" pointer is just that, a pointer into local
-    // address space.
-    virtual status_t allocateBuffer(
+    // Allocate an opaque buffer as a native handle. If component supports returning native
+    // handles, those are returned in *native_handle. Otherwise, the allocated buffer is
+    // returned in *buffer_data. This clearly only makes sense if the caller lives in the
+    // same process as the callee, i.e. is the media_server, as the returned "buffer_data"
+    // pointer is just that, a pointer into local address space.
+    virtual status_t allocateSecureBuffer(
             node_id node, OMX_U32 port_index, size_t size,
-            buffer_id *buffer, void **buffer_data) = 0;
+            buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) = 0;
 
     // Allocate an OMX buffer of size |allotedSize|. Use |params| as the backup buffer, which
     // may be larger.
@@ -184,6 +191,7 @@
         INTERNAL_OPTION_MAX_FPS, // data is float
         INTERNAL_OPTION_START_TIME, // data is an int64_t
         INTERNAL_OPTION_TIME_LAPSE, // data is an int64_t[2]
+        INTERNAL_OPTION_COLOR_ASPECTS, // data is ColorAspects
     };
     virtual status_t setInternalOption(
             node_id node,
@@ -269,17 +277,18 @@
     OMX_U32 mLevel;
 };
 
-}  // namespace android
-
-inline static const char *asString(android::MetadataBufferType i, const char *def = "??") {
+inline static const char *asString(MetadataBufferType i, const char *def = "??") {
     using namespace android;
     switch (i) {
         case kMetadataBufferTypeCameraSource:   return "CameraSource";
         case kMetadataBufferTypeGrallocSource:  return "GrallocSource";
         case kMetadataBufferTypeANWBuffer:      return "ANWBuffer";
+        case kMetadataBufferTypeNativeHandleSource: return "NativeHandleSource";
         case kMetadataBufferTypeInvalid:        return "Invalid";
         default:                                return def;
     }
 }
 
+}  // namespace android
+
 #endif  // ANDROID_IOMX_H_
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
index 4067b47..48d0407 100644
--- a/include/media/MediaCodecInfo.h
+++ b/include/media/MediaCodecInfo.h
@@ -33,7 +33,6 @@
 
 struct AMessage;
 class Parcel;
-struct CodecCapabilities;
 
 typedef KeyedVector<AString, AString> CodecSettings;
 
@@ -44,12 +43,23 @@
     };
 
     struct Capabilities : public RefBase {
+        enum {
+            // decoder flags
+            kFlagSupportsAdaptivePlayback = 1 << 0,
+            kFlagSupportsSecurePlayback = 1 << 1,
+            kFlagSupportsTunneledPlayback = 1 << 2,
+
+            // encoder flags
+            kFlagSupportsIntraRefresh = 1 << 0,
+
+        };
+
         void getSupportedProfileLevels(Vector<ProfileLevel> *profileLevels) const;
         void getSupportedColorFormats(Vector<uint32_t> *colorFormats) const;
         uint32_t getFlags() const;
         const sp<AMessage> getDetails() const;
 
-    private:
+    protected:
         Vector<ProfileLevel> mProfileLevels;
         Vector<uint32_t> mColorFormats;
         uint32_t mFlags;
@@ -57,6 +67,7 @@
 
         Capabilities();
 
+    private:
         // read object from parcel even if object creation fails
         static sp<Capabilities> FromParcel(const Parcel &parcel);
         status_t writeToParcel(Parcel *parcel) const;
@@ -66,6 +77,14 @@
         friend class MediaCodecInfo;
     };
 
+    // Use a subclass to allow setting fields on construction without allowing
+    // to do the same throughout the framework.
+    struct CapabilitiesBuilder : public Capabilities {
+        void addProfileLevel(uint32_t profile, uint32_t level);
+        void addColorFormat(uint32_t format);
+        void addFlags(uint32_t flags);
+    };
+
     bool isEncoder() const;
     bool hasQuirk(const char *name) const;
     void getSupportedMimes(Vector<AString> *mimes) const;
@@ -107,7 +126,8 @@
     void addQuirk(const char *name);
     status_t addMime(const char *mime);
     status_t updateMime(const char *mime);
-    status_t initializeCapabilities(const CodecCapabilities &caps);
+
+    status_t initializeCapabilities(const sp<Capabilities> &caps);
     void addDetail(const AString &key, const AString &value);
     void addFeature(const AString &key, int32_t value);
     void addFeature(const AString &key, const char *value);
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index de82554..4977efd 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -99,10 +99,12 @@
         virtual float       msecsPerFrame() const = 0;
         virtual status_t    getPosition(uint32_t *position) const = 0;
         virtual status_t    getTimestamp(AudioTimestamp &ts) const = 0;
+        virtual int64_t     getPlayedOutDurationUs(int64_t nowUs) const = 0;
         virtual status_t    getFramesWritten(uint32_t *frameswritten) const = 0;
-        virtual int         getSessionId() const = 0;
+        virtual audio_session_t getSessionId() const = 0;
         virtual audio_stream_type_t getAudioStreamType() const = 0;
         virtual uint32_t    getSampleRate() const = 0;
+        virtual int64_t     getBufferDurationInUs() const = 0;
 
         // If no callback is specified, use the "write" API below to submit
         // audio data.
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index d6cc4bb..5195993 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -42,7 +42,7 @@
     virtual status_t setVideoEncoder(video_encoder ve) = 0;
     virtual status_t setVideoSize(int width, int height) = 0;
     virtual status_t setVideoFrameRate(int frames_per_second) = 0;
-    virtual status_t setCamera(const sp<ICamera>& camera,
+    virtual status_t setCamera(const sp<hardware::ICamera>& camera,
                                const sp<ICameraRecordingProxy>& proxy) = 0;
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
@@ -53,6 +53,8 @@
     virtual status_t prepare() = 0;
     virtual status_t start() = 0;
     virtual status_t stop() = 0;
+    virtual status_t pause() = 0;
+    virtual status_t resume() = 0;
     virtual status_t close() = 0;
     virtual status_t reset() = 0;
     virtual status_t getMaxAmplitude(int *max) = 0;
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
index 20f2cad..1957a45 100644
--- a/include/media/MediaResource.h
+++ b/include/media/MediaResource.h
@@ -23,17 +23,24 @@
 
 namespace android {
 
-extern const char kResourceSecureCodec[];
-extern const char kResourceNonSecureCodec[];
-extern const char kResourceAudioCodec[];
-extern const char kResourceVideoCodec[];
-extern const char kResourceGraphicMemory[];
-
 class MediaResource {
 public:
+    enum Type {
+        kUnspecified = 0,
+        kSecureCodec,
+        kNonSecureCodec,
+        kGraphicMemory
+    };
+
+    enum SubType {
+        kUnspecifiedSubType = 0,
+        kAudioCodec,
+        kVideoCodec
+    };
+
     MediaResource();
-    MediaResource(String8 type, uint64_t value);
-    MediaResource(String8 type, String8 subType, uint64_t value);
+    MediaResource(Type type, uint64_t value);
+    MediaResource(Type type, SubType subType, uint64_t value);
 
     void readFromParcel(const Parcel &parcel);
     void writeToParcel(Parcel *parcel) const;
@@ -43,11 +50,30 @@
     bool operator==(const MediaResource &other) const;
     bool operator!=(const MediaResource &other) const;
 
-    String8 mType;
-    String8 mSubType;
+    Type mType;
+    SubType mSubType;
     uint64_t mValue;
 };
 
+inline static const char *asString(MediaResource::Type i, const char *def = "??") {
+    switch (i) {
+        case MediaResource::kUnspecified:    return "unspecified";
+        case MediaResource::kSecureCodec:    return "secure-codec";
+        case MediaResource::kNonSecureCodec: return "non-secure-codec";
+        case MediaResource::kGraphicMemory:  return "graphic-memory";
+        default:                             return def;
+    }
+}
+
+inline static const char *asString(MediaResource::SubType i, const char *def = "??") {
+    switch (i) {
+        case MediaResource::kUnspecifiedSubType: return "unspecified";
+        case MediaResource::kAudioCodec:         return "audio-codec";
+        case MediaResource::kVideoCodec:         return "video-codec";
+        default:                                 return def;
+    }
+}
+
 }; // namespace android
 
 #endif  // ANDROID_MEDIA_RESOURCE_H
diff --git a/include/media/MemoryLeakTrackUtil.h b/include/media/MemoryLeakTrackUtil.h
index d2618aa..4c1a60c 100644
--- a/include/media/MemoryLeakTrackUtil.h
+++ b/include/media/MemoryLeakTrackUtil.h
@@ -16,11 +16,16 @@
 #ifndef MEMORY_LEAK_TRACK_UTIL_H
 #define MEMORY_LEAK_TRACK_UTIL_H
 
+#include <iostream>
+
 namespace android {
 /*
- * Dump the memory address of the calling process to the given fd.
+ * Dump the heap memory of the calling process, sorted by total size
+ * (allocation size * number of allocations).
+ *
+ *    limit is the number of unique allocations to return.
  */
-extern void dumpMemoryAddresses(int fd);
+extern std::string dumpMemoryAddresses(size_t limit);
 
 };
 
diff --git a/include/media/Modulo.h b/include/media/Modulo.h
new file mode 100644
index 0000000..23280ac
--- /dev/null
+++ b/include/media/Modulo.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MODULO_H
+#define ANDROID_MODULO_H
+
+namespace android {
+
+// Modulo class is used for intentionally wrapping variables such as
+// counters and timers.
+//
+// It may also be used for variables whose computation depends on the
+// associativity of addition or subtraction.
+//
+// Features:
+// 1) Modulo checks type sizes before performing operations to ensure
+//    that the wrap points match. This is critical for safe modular arithmetic.
+// 2) Modulo returns Modulo types from arithmetic operations, thereby
+//    avoiding unintentional use in a non-modular computation.  A Modulo
+//    type is converted to its base non-Modulo type through the value() function.
+// 3) Modulo separates out overflowable types from non-overflowable types.
+//    A signed overflow is technically undefined in C and C++.
+//    Modulo types do not participate in sanitization.
+// 4) Modulo comparisons are based on signed differences to account for wrap;
+//    this is not the same as the direct comparison of values.
+// 5) Safe use of binary arithmetic operations relies on conversions of
+//    signed operands to unsigned operands (which are modular arithmetic safe).
+//    Conversions which are implementation-defined are assumed to use 2's complement
+//    representation. (See A, B, C, D from the ISO/IEC FDIS 14882
+//    Information technology — Programming languages — C++).
+//
+// A: ISO/IEC 14882:2011(E) p84 section 4.7 Integral conversions
+// (2) If the destination type is unsigned, the resulting value is the least unsigned
+// integer congruent to the source integer (modulo 2^n where n is the number of bits
+// used to represent the unsigned type). [ Note: In a two’s complement representation,
+// this conversion is conceptual and there is no change in the bit pattern (if there
+// is no truncation). — end note ]
+// (3) If the destination type is signed, the value is unchanged if it can be represented
+// in the destination type (and bit-field width); otherwise, the value is
+// implementation-defined.
+//
+// B: ISO/IEC 14882:2011(E) p88 section 5 Expressions
+// (9) Many binary operators that expect operands of arithmetic or enumeration type
+// cause conversions and yield result types in a similar way. The purpose is to
+// yield a common type, which is also the type of the result. This pattern is called
+// the usual arithmetic conversions, which are defined as follows:
+// [...]
+// Otherwise, if both operands have signed integer types or both have unsigned
+// integer types, the operand with the type of lesser integer conversion rank shall be
+// converted to the type of the operand with greater rank.
+// — Otherwise, if the operand that has unsigned integer type has rank greater than
+// or equal to the rank of the type of the other operand, the operand with signed
+// integer type shall be converted to the type of the operand with unsigned integer type.
+//
+// C: ISO/IEC 14882:2011(E) p86 section 4.13 Integer conversion rank
+// [...] The rank of long long int shall be greater than the rank of long int,
+// which shall be greater than the rank of int, which shall be greater than the
+// rank of short int, which shall be greater than the rank of signed char.
+// — The rank of any unsigned integer type shall equal the rank of the corresponding
+// signed integer type.
+//
+// D: ISO/IEC 14882:2011(E) p75 section 3.9.1 Fundamental types
+// [...] Unsigned integers, declared unsigned, shall obey the laws of arithmetic modulo
+// 2^n where n is the number of bits in the value representation of that particular
+// size of integer.
+//
+// Note:
+// Other libraries do exist for safe integer operations which can detect the
+// possibility of overflow (SafeInt from MS and safe-iop in android).
+// Signed safe computation is also possible from the art header safe_math.h.
+
+template <typename T> class Modulo {
+    T mValue;
+
+public:
+    typedef typename std::make_signed<T>::type signedT;
+    typedef typename std::make_unsigned<T>::type unsignedT;
+
+    Modulo() { } // intentionally uninitialized data
+    Modulo(const T &value) { mValue = value; }
+    const T & value() const { return mValue; } // not assignable
+    signedT signedValue() const { return mValue; }
+    unsignedT unsignedValue() const { return mValue; }
+    void getValue(T *value) const { *value = mValue; } // more type safe than value()
+
+    // modular operations valid only if size of T <= size of S.
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    Modulo<T> operator +=(const Modulo<S> &other) {
+        static_assert(sizeof(T) <= sizeof(S), "argument size mismatch");
+        mValue += other.unsignedValue();
+        return *this;
+    }
+
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    Modulo<T> operator -=(const Modulo<S> &other) {
+        static_assert(sizeof(T) <= sizeof(S), "argument size mismatch");
+        mValue -= other.unsignedValue();
+        return *this;
+    }
+
+    // modular operations resulting in a value valid only at the smaller of the two
+    // Modulo base type sizes, but we only allow equal sizes to avoid confusion.
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    const Modulo<T> operator +(const Modulo<S> &other) const {
+        static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+        return Modulo<T>(mValue + other.unsignedValue());
+    }
+
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    const Modulo<T> operator -(const Modulo<S> &other) const {
+        static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+        return Modulo<T>(mValue - other.unsignedValue());
+    }
+
+    // modular operations that should be checked only at the smaller of
+    // the two type sizes, but we only allow equal sizes to avoid confusion.
+    //
+    // Caution: These relational and comparison operations are not equivalent to
+    // the base type operations.
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    bool operator >(const Modulo<S> &other) const {
+        static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+        return static_cast<signedT>(mValue - other.unsignedValue()) > 0;
+    }
+
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    bool operator >=(const Modulo<S> &other) const {
+        static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+        return static_cast<signedT>(mValue - other.unsignedValue()) >= 0;
+    }
+
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    bool operator ==(const Modulo<S> &other) const {
+        static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+        return static_cast<signedT>(mValue - other.unsignedValue()) == 0;
+    }
+
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    bool operator <=(const Modulo<S> &other) const {
+        static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+        return static_cast<signedT>(mValue - other.unsignedValue()) <= 0;
+    }
+
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    bool operator <(const Modulo<S> &other) const {
+        static_assert(sizeof(T) == sizeof(S), "argument size mismatch");
+        return static_cast<signedT>(mValue - other.unsignedValue()) < 0;
+    }
+
+
+    // modular operations with a non-Modulo type allowed with wrapping
+    // because there should be no confusion as to the meaning.
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    Modulo<T> operator +=(const S &other) {
+        mValue += unsignedT(other);
+        return *this;
+    }
+
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    Modulo<T> operator -=(const S &other) {
+        mValue -= unsignedT(other);
+        return *this;
+    }
+
+    // modular operations with a non-Modulo type allowed with wrapping,
+    // but we restrict this only when size of T is greater than or equal to
+    // the size of S to avoid confusion with the nature of overflow.
+    //
+    // Use of this follows left-associative style.
+    //
+    // Note: a Modulo type may be promoted by using "differences" off of
+    // a larger sized type, but we do not automate this.
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    const Modulo<T> operator +(const S &other) const {
+        static_assert(sizeof(T) >= sizeof(S), "argument size mismatch");
+        return Modulo<T>(mValue + unsignedT(other));
+    }
+
+    template <typename S>
+    __attribute__((no_sanitize("integer")))
+    const Modulo<T> operator -(const S &other) const {
+        static_assert(sizeof(T) >= sizeof(S), "argument size mismatch");
+        return Modulo<T>(mValue - unsignedT(other));
+    }
+
+    // multiply is intentionally omitted, but it is a common operator in
+    // modular arithmetic.
+
+    // shift operations are intentionally omitted, but perhaps useful.
+    // For example, left-shifting a negative number is undefined in C++11.
+};
+
+} // namespace android
+
+#endif /* ANDROID_MODULO_H */
diff --git a/media/libmediaplayerservice/SharedLibrary.h b/include/media/SharedLibrary.h
similarity index 100%
rename from media/libmediaplayerservice/SharedLibrary.h
rename to include/media/SharedLibrary.h
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index 8406ed6..c41c686 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -193,12 +193,15 @@
         TONE_JAPAN_DIAL,            // Dial tone: 400Hz, continuous
         TONE_JAPAN_BUSY,            // Busy tone: 400Hz, 500ms ON, 500ms OFF...
         TONE_JAPAN_RADIO_ACK,       // Radio path acknowlegment: 400Hz, 1s ON, 2s OFF...
+        // UK Supervisory tones
+        TONE_UK_RINGTONE,           // Ring Tone: A 400Hz + 450Hz tone repeated in a 0.4s on, 0.2s off, 0.4s on, 2.0s off pattern.
         NUM_ALTERNATE_TONES
     };
 
     enum region {
         ANSI,
         JAPAN,
+        UK,
         CEPT,
         NUM_REGIONS
     };
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 186e018..ec0dad5 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -69,7 +69,7 @@
                                    int32_t priority = 0,
                                    effect_callback_t cbf = NULL,
                                    void* user = NULL,
-                                   int sessionId = 0);
+                                   audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX);
 
                         ~Visualizer();
 
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 3fe749c..cec9d99 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -215,7 +215,6 @@
                     const KeyedVector<String8, String8> *headers);
 
             status_t        setDataSource(int fd, int64_t offset, int64_t length);
-            status_t        setDataSource(const sp<IStreamSource> &source);
             status_t        setDataSource(const sp<IDataSource> &source);
             status_t        setVideoSurfaceTexture(
                                     const sp<IGraphicBufferProducer>& bufferProducer);
@@ -247,8 +246,8 @@
             status_t        invoke(const Parcel& request, Parcel *reply);
             status_t        setMetadataFilter(const Parcel& filter);
             status_t        getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
-            status_t        setAudioSessionId(int sessionId);
-            int             getAudioSessionId();
+            status_t        setAudioSessionId(audio_session_t sessionId);
+            audio_session_t getAudioSessionId();
             status_t        setAuxEffectSendLevel(float level);
             status_t        attachAuxEffect(int effectId);
             status_t        setParameter(int key, const Parcel& request);
@@ -285,7 +284,7 @@
     float                       mRightVolume;
     int                         mVideoWidth;
     int                         mVideoHeight;
-    int                         mAudioSessionId;
+    audio_session_t             mAudioSessionId;
     float                       mSendLevel;
     struct sockaddr_in          mRetransmitEndpoint;
     bool                        mRetransmitEndpointValid;
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 15ff82d..c3f39a2 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -29,12 +29,15 @@
 
 class Surface;
 class IMediaRecorder;
-class ICamera;
 class ICameraRecordingProxy;
 class IGraphicBufferProducer;
 struct PersistentSurface;
 class Surface;
 
+namespace hardware {
+class ICamera;
+}
+
 typedef void (*media_completion_f)(status_t status, void *cookie);
 
 enum video_source {
@@ -95,6 +98,7 @@
     VIDEO_ENCODER_H264 = 2,
     VIDEO_ENCODER_MPEG_4_SP = 3,
     VIDEO_ENCODER_VP8 = 4,
+    VIDEO_ENCODER_HEVC = 5,
 
     VIDEO_ENCODER_LIST_END // must be the last - used to validate the video encoder type
 };
@@ -215,7 +219,8 @@
 
     void        died();
     status_t    initCheck();
-    status_t    setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
+    status_t    setCamera(const sp<hardware::ICamera>& camera,
+            const sp<ICameraRecordingProxy>& proxy);
     status_t    setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
     status_t    setVideoSource(int vs);
     status_t    setAudioSource(int as);
@@ -233,6 +238,8 @@
     status_t    start();
     status_t    stop();
     status_t    reset();
+    status_t    pause();
+    status_t    resume();
     status_t    init();
     status_t    close();
     status_t    release();
diff --git a/include/media/nbaio/AudioBufferProviderSource.h b/include/media/nbaio/AudioBufferProviderSource.h
index b16e20a..4747dcf 100644
--- a/include/media/nbaio/AudioBufferProviderSource.h
+++ b/include/media/nbaio/AudioBufferProviderSource.h
@@ -42,9 +42,8 @@
     //virtual size_t framesOverrun();
     //virtual size_t overruns();
     virtual ssize_t availableToRead();
-    virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
-    virtual ssize_t readVia(readVia_t via, size_t total, void *user,
-                            int64_t readPTS, size_t block);
+    virtual ssize_t read(void *buffer, size_t count);
+    virtual ssize_t readVia(readVia_t via, size_t total, void *user, size_t block);
 
 private:
     AudioBufferProvider * const mProvider;
diff --git a/include/media/nbaio/AudioStreamInSource.h b/include/media/nbaio/AudioStreamInSource.h
index 5169f1e..a6e7992 100644
--- a/include/media/nbaio/AudioStreamInSource.h
+++ b/include/media/nbaio/AudioStreamInSource.h
@@ -38,14 +38,14 @@
     // NBAIO_Sink interface
 
     //virtual size_t framesRead() const;
-    virtual size_t framesOverrun();
-    virtual size_t overruns() { (void) framesOverrun(); return mOverruns; }
+    virtual int64_t framesOverrun();
+    virtual int64_t overruns() { (void) framesOverrun(); return mOverruns; }
 
     // This is an over-estimate, and could dupe the caller into making a blocking read()
     // FIXME Use an audio HAL API to query the buffer filling status when it's available.
     virtual ssize_t availableToRead() { return mStreamBufferSizeBytes / mFrameSize; }
 
-    virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
+    virtual ssize_t read(void *buffer, size_t count);
 
     // NBAIO_Sink end
 
@@ -56,8 +56,8 @@
 private:
     audio_stream_in * const mStream;
     size_t              mStreamBufferSizeBytes; // as reported by get_buffer_size()
-    size_t              mFramesOverrun;
-    size_t              mOverruns;
+    int64_t             mFramesOverrun;
+    int64_t             mOverruns;
 };
 
 }   // namespace android
diff --git a/include/media/nbaio/AudioStreamOutSink.h b/include/media/nbaio/AudioStreamOutSink.h
index 9949b88..e86b018 100644
--- a/include/media/nbaio/AudioStreamOutSink.h
+++ b/include/media/nbaio/AudioStreamOutSink.h
@@ -47,12 +47,7 @@
 
     virtual ssize_t write(const void *buffer, size_t count);
 
-    // AudioStreamOutSink wraps a HAL's output stream.  Its
-    // getNextWriteTimestamp method is simply a passthru to the HAL's underlying
-    // implementation of GNWT (if any)
-    virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
-    virtual status_t getTimestamp(AudioTimestamp& timestamp);
+    virtual status_t getTimestamp(ExtendedTimestamp &timestamp);
 
     // NBAIO_Sink end
 
diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h
index b09b35f..d2cd218 100644
--- a/include/media/nbaio/MonoPipe.h
+++ b/include/media/nbaio/MonoPipe.h
@@ -18,13 +18,12 @@
 #define ANDROID_AUDIO_MONO_PIPE_H
 
 #include <time.h>
-#include <utils/LinearTransform.h>
 #include "NBAIO.h"
 #include <media/SingleStateQueue.h>
 
 namespace android {
 
-typedef SingleStateQueue<AudioTimestamp> AudioTimestampSingleStateQueue;
+typedef SingleStateQueue<ExtendedTimestamp> ExtendedTimestampSingleStateQueue;
 
 // MonoPipe is similar to Pipe except:
 //  - supports only a single reader, called MonoPipeReader
@@ -52,28 +51,14 @@
 
     // NBAIO_Sink interface
 
-    //virtual size_t framesWritten() const;
-    //virtual size_t framesUnderrun() const;
-    //virtual size_t underruns() const;
+    //virtual int64_t framesWritten() const;
+    //virtual int64_t framesUnderrun() const;
+    //virtual int64_t underruns() const;
 
     virtual ssize_t availableToWrite() const;
     virtual ssize_t write(const void *buffer, size_t count);
     //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
 
-    // MonoPipe's implementation of getNextWriteTimestamp works in conjunction
-    // with MonoPipeReader.  Every time a MonoPipeReader reads from the pipe, it
-    // receives a "readPTS" indicating the point in time for which the reader
-    // would like to read data.  This "last read PTS" is offset by the amt of
-    // data the reader is currently mixing and then cached cached along with the
-    // updated read pointer.  This cached value is the local time for which the
-    // reader is going to request data next time it reads data (assuming we are
-    // in steady state and operating with no underflows).  Writers to the
-    // MonoPipe who would like to know when their next write operation will hit
-    // the speakers can call getNextWriteTimestamp which will return the value
-    // of the last read PTS plus the duration of the amt of data waiting to be
-    // read in the MonoPipe.
-    virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
             // average number of frames present in the pipe under normal conditions.
             // See throttling mechanism in MonoPipe::write()
             size_t  getAvgFrames() const { return mSetpoint; }
@@ -92,51 +77,29 @@
             bool    isShutdown();
 
             // Return NO_ERROR if there is a timestamp available
-            status_t getTimestamp(AudioTimestamp& timestamp);
+            status_t getTimestamp(ExtendedTimestamp &timestamp);
 
 private:
-    // A pair of methods and a helper variable which allows the reader and the
-    // writer to update and observe the values of mFront and mNextRdPTS in an
-    // atomic lock-less fashion.
-    //
-    // :: Important ::
-    // Two assumptions must be true in order for this lock-less approach to
-    // function properly on all systems.  First, there may only be one updater
-    // thread in the system.  Second, the updater thread must be running at a
-    // strictly higher priority than the observer threads.  Currently, both of
-    // these assumptions are true.  The only updater is always a single
-    // FastMixer thread (which runs with SCHED_FIFO/RT priority while the only
-    // observer is always an AudioFlinger::PlaybackThread running with
-    // traditional (non-RT) audio priority.
-    void updateFrontAndNRPTS(int32_t newFront, int64_t newNextRdPTS);
-    void observeFrontAndNRPTS(int32_t *outFront, int64_t *outNextRdPTS);
-    volatile int32_t mUpdateSeq;
-
     const size_t    mReqFrames;     // as requested in constructor, unrounded
     const size_t    mMaxFrames;     // always a power of 2
     void * const    mBuffer;
     // mFront and mRear will never be separated by more than mMaxFrames.
     // 32-bit overflow is possible if the pipe is active for a long time, but if that happens it's
     // safe because we "&" with (mMaxFrames-1) at end of computations to calculate a buffer index.
-    volatile int32_t mFront;        // written by the reader with updateFrontAndNRPTS, observed by
-                                    // the writer with observeFrontAndNRPTS
+    volatile int32_t mFront;        // written by reader with android_atomic_release_store,
+                                    // read by writer with android_atomic_acquire_load
     volatile int32_t mRear;         // written by writer with android_atomic_release_store,
                                     // read by reader with android_atomic_acquire_load
-    volatile int64_t mNextRdPTS;    // written by the reader with updateFrontAndNRPTS, observed by
-                                    // the writer with observeFrontAndNRPTS
     bool            mWriteTsValid;  // whether mWriteTs is valid
     struct timespec mWriteTs;       // time that the previous write() completed
     size_t          mSetpoint;      // target value for pipe fill depth
     const bool      mWriteCanBlock; // whether write() should block if the pipe is full
 
-    int64_t offsetTimestampByAudioFrames(int64_t ts, size_t audFrames);
-    LinearTransform mSamplesToLocalTime;
-
     bool            mIsShutdown;    // whether shutdown(true) was called, no barriers are needed
 
-    AudioTimestampSingleStateQueue::Shared      mTimestampShared;
-    AudioTimestampSingleStateQueue::Mutator     mTimestampMutator;
-    AudioTimestampSingleStateQueue::Observer    mTimestampObserver;
+    ExtendedTimestampSingleStateQueue::Shared      mTimestampShared;
+    ExtendedTimestampSingleStateQueue::Mutator     mTimestampMutator;
+    ExtendedTimestampSingleStateQueue::Observer    mTimestampObserver;
 };
 
 }   // namespace android
diff --git a/include/media/nbaio/MonoPipeReader.h b/include/media/nbaio/MonoPipeReader.h
index 78fe867..b3c891d 100644
--- a/include/media/nbaio/MonoPipeReader.h
+++ b/include/media/nbaio/MonoPipeReader.h
@@ -47,9 +47,9 @@
 
     virtual ssize_t availableToRead();
 
-    virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
+    virtual ssize_t read(void *buffer, size_t count);
 
-    virtual void    onTimestamp(const AudioTimestamp& timestamp);
+    virtual void    onTimestamp(const ExtendedTimestamp &timestamp);
 
     // NBAIO_Source end
 
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index d9bbc8d..120de4f 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -79,8 +79,7 @@
 
 // Callbacks used by NBAIO_Sink::writeVia() and NBAIO_Source::readVia() below.
 typedef ssize_t (*writeVia_t)(void *user, void *buffer, size_t count);
-typedef ssize_t (*readVia_t)(void *user, const void *buffer,
-                             size_t count, int64_t readPTS);
+typedef ssize_t (*readVia_t)(void *user, const void *buffer, size_t count);
 
 // Check whether an NBAIO_Format is valid
 bool Format_isValid(const NBAIO_Format& format);
@@ -146,13 +145,13 @@
     // 32 bits rolls over after 27 hours at 44.1 kHz; if that concerns you then poll periodically.
 
     // Return the number of frames written successfully since construction.
-    virtual size_t framesWritten() const { return mFramesWritten; }
+    virtual int64_t framesWritten() const { return mFramesWritten; }
 
     // Number of frames lost due to underrun since construction.
-    virtual size_t framesUnderrun() const { return 0; }
+    virtual int64_t framesUnderrun() const { return 0; }
 
     // Number of underruns since construction, where a set of contiguous lost frames is one event.
-    virtual size_t underruns() const { return 0; }
+    virtual int64_t underruns() const { return 0; }
 
     // Estimate of number of frames that could be written successfully now without blocking.
     // When a write() is actually attempted, the implementation is permitted to return a smaller or
@@ -210,25 +209,10 @@
     //  < 0     status_t error occurred prior to the first frame transfer during this callback.
     virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block = 0);
 
-    // Get the time (on the LocalTime timeline) at which the first frame of audio of the next write
-    // operation to this sink will be eventually rendered by the HAL.
-    // Inputs:
-    //  ts      A pointer pointing to the int64_t which will hold the result.
-    // Return value:
-    //  OK      Everything went well, *ts holds the time at which the first audio frame of the next
-    //          write operation will be rendered, or AudioBufferProvider::kInvalidPTS if this sink
-    //          does not know the answer for some reason.  Sinks which eventually lead to a HAL
-    //          which implements get_next_write_timestamp may return Invalid temporarily if the DMA
-    //          output of the audio driver has not started yet.  Sinks which lead to a HAL which
-    //          does not implement get_next_write_timestamp, or which don't lead to a HAL at all,
-    //          will always return kInvalidPTS.
-    //  <other> Something unexpected happened internally.  Check the logs and start debugging.
-    virtual status_t getNextWriteTimestamp(int64_t *ts) { return INVALID_OPERATION; }
-
     // Returns NO_ERROR if a timestamp is available.  The timestamp includes the total number
     // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC
     // as of this presentation count.  The timestamp parameter is undefined if error is returned.
-    virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
+    virtual status_t getTimestamp(ExtendedTimestamp &timestamp) { return INVALID_OPERATION; }
 
 protected:
     NBAIO_Sink(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0)
@@ -236,7 +220,7 @@
     virtual ~NBAIO_Sink() { }
 
     // Implementations are free to ignore these if they don't need them
-    size_t  mFramesWritten;
+    int64_t  mFramesWritten;
 };
 
 // Abstract class (interface) representing a non-blocking data source, for use by a data consumer.
@@ -248,15 +232,15 @@
     // 32 bits rolls over after 27 hours at 44.1 kHz; if that concerns you then poll periodically.
 
     // Number of frames read successfully since construction.
-    virtual size_t framesRead() const { return mFramesRead; }
+    virtual int64_t framesRead() const { return mFramesRead; }
 
     // Number of frames lost due to overrun since construction.
     // Not const because implementations may need to do I/O.
-    virtual size_t framesOverrun() /*const*/ { return 0; }
+    virtual int64_t framesOverrun() /*const*/ { return 0; }
 
     // Number of overruns since construction, where a set of contiguous lost frames is one event.
     // Not const because implementations may need to do I/O.
-    virtual size_t overruns() /*const*/ { return 0; }
+    virtual int64_t overruns() /*const*/ { return 0; }
 
     // Estimate of number of frames that could be read successfully now.
     // When a read() is actually attempted, the implementation is permitted to return a smaller or
@@ -271,8 +255,6 @@
     // Inputs:
     //  buffer  Non-NULL destination buffer owned by consumer.
     //  count   Maximum number of frames to transfer.
-    //  readPTS The presentation time (on the LocalTime timeline) for which data
-    //          is being requested, or kInvalidPTS if not known.
     // Return value:
     //  > 0     Number of frames successfully transferred prior to first error.
     //  = 0     Count was zero.
@@ -282,7 +264,7 @@
     //  WOULD_BLOCK No frames can be transferred without blocking.
     //  OVERRUN     read() has not been called frequently enough, or with enough frames to keep up.
     //              One or more frames were lost due to overrun, try again to read more recent data.
-    virtual ssize_t read(void *buffer, size_t count, int64_t readPTS) = 0;
+    virtual ssize_t read(void *buffer, size_t count) = 0;
 
     // Transfer data from source using a series of callbacks.  More suitable for zero-fill,
     // synthesis, and non-contiguous transfers (e.g. circular buffer or readv).
@@ -291,8 +273,6 @@
     //  total   Estimate of the number of frames the consumer desires.  This is an estimate,
     //          and it can consume a different number of frames during the series of callbacks.
     //  user    Arbitrary void * reserved for data consumer.
-    //  readPTS The presentation time (on the LocalTime timeline) for which data
-    //          is being requested, or kInvalidPTS if not known.
     //  block   Number of frames per block, that is a suggested value for 'count' in each callback.
     //          Zero means no preference.  This parameter is a hint only, and may be ignored.
     // Return value:
@@ -315,12 +295,11 @@
     //  > 0     Number of frames successfully transferred during this callback prior to first error.
     //  = 0     Count was zero.
     //  < 0     status_t error occurred prior to the first frame transfer during this callback.
-    virtual ssize_t readVia(readVia_t via, size_t total, void *user,
-                            int64_t readPTS, size_t block = 0);
+    virtual ssize_t readVia(readVia_t via, size_t total, void *user, size_t block = 0);
 
     // Invoked asynchronously by corresponding sink when a new timestamp is available.
     // Default implementation ignores the timestamp.
-    virtual void    onTimestamp(const AudioTimestamp& timestamp) { }
+    virtual void    onTimestamp(const ExtendedTimestamp& timestamp) { }
 
 protected:
     NBAIO_Source(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0)
@@ -328,7 +307,7 @@
     virtual ~NBAIO_Source() { }
 
     // Implementations are free to ignore these if they don't need them
-    size_t  mFramesRead;
+    int64_t  mFramesRead;
 };
 
 }   // namespace android
diff --git a/include/media/nbaio/Pipe.h b/include/media/nbaio/Pipe.h
index eba37bc..cc95ff7 100644
--- a/include/media/nbaio/Pipe.h
+++ b/include/media/nbaio/Pipe.h
@@ -45,9 +45,9 @@
 
     // NBAIO_Sink interface
 
-    //virtual size_t framesWritten() const;
-    //virtual size_t framesUnderrun() const;
-    //virtual size_t underruns() const;
+    //virtual int64_t framesWritten() const;
+    //virtual int64_t framesUnderrun() const;
+    //virtual int64_t underruns() const;
 
     // The write side of a pipe permits overruns; flow control is the caller's responsibility.
     // It doesn't return +infinity because that would guarantee an overrun.
diff --git a/include/media/nbaio/PipeReader.h b/include/media/nbaio/PipeReader.h
index 350e6ab..7c733ad 100644
--- a/include/media/nbaio/PipeReader.h
+++ b/include/media/nbaio/PipeReader.h
@@ -40,12 +40,12 @@
     // NBAIO_Source interface
 
     //virtual size_t framesRead() const;
-    virtual size_t framesOverrun() { return mFramesOverrun; }
-    virtual size_t overruns()  { return mOverruns; }
+    virtual int64_t framesOverrun() { return mFramesOverrun; }
+    virtual int64_t overruns()  { return mOverruns; }
 
     virtual ssize_t availableToRead();
 
-    virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
+    virtual ssize_t read(void *buffer, size_t count);
 
     // NBAIO_Source end
 
@@ -56,8 +56,8 @@
 private:
     Pipe&       mPipe;
     int32_t     mFront;         // follows behind mPipe.mRear
-    size_t      mFramesOverrun;
-    size_t      mOverruns;
+    int64_t     mFramesOverrun;
+    int64_t     mOverruns;
 };
 
 }   // namespace android
diff --git a/include/media/nbaio/SourceAudioBufferProvider.h b/include/media/nbaio/SourceAudioBufferProvider.h
index daf6bc3..ae49903 100644
--- a/include/media/nbaio/SourceAudioBufferProvider.h
+++ b/include/media/nbaio/SourceAudioBufferProvider.h
@@ -31,13 +31,13 @@
     virtual ~SourceAudioBufferProvider();
 
     // AudioBufferProvider interface
-    virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
+    virtual status_t getNextBuffer(Buffer *buffer);
     virtual void     releaseBuffer(Buffer *buffer);
 
     // ExtendedAudioBufferProvider interface
     virtual size_t   framesReady() const;
-    virtual size_t   framesReleased() const;
-    virtual void     onTimestamp(const AudioTimestamp& timestamp);
+    virtual int64_t  framesReleased() const;
+    virtual void     onTimestamp(const ExtendedTimestamp &timestamp);
 
 private:
     const sp<NBAIO_Source> mSource;     // the wrapped source
@@ -47,7 +47,7 @@
     size_t              mOffset;    // frame offset within mAllocated of valid data
     size_t              mRemaining; // frame count within mAllocated of valid data
     size_t              mGetCount;  // buffer.frameCount of the most recent getNextBuffer
-    uint32_t            mFramesReleased;    // counter of the total number of frames released
+    int64_t             mFramesReleased;    // counter of the total number of frames released
 };
 
 }   // namespace android
diff --git a/include/media/stagefright/AACWriter.h b/include/media/stagefright/AACWriter.h
index aa60a19..a1f63d7 100644
--- a/include/media/stagefright/AACWriter.h
+++ b/include/media/stagefright/AACWriter.h
@@ -31,7 +31,7 @@
 
     status_t initCheck() const;
 
-    virtual status_t addSource(const sp<MediaSource> &source);
+    virtual status_t addSource(const sp<IMediaSource> &source);
     virtual bool reachedEOS();
     virtual status_t start(MetaData *params = NULL);
     virtual status_t stop() { return reset(); }
@@ -48,7 +48,7 @@
 
     int   mFd;
     status_t mInitCheck;
-    sp<MediaSource> mSource;
+    sp<IMediaSource> mSource;
     bool mStarted;
     volatile bool mPaused;
     volatile bool mResumed;
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 8b5b862..4f2517c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -25,7 +25,9 @@
 #include <media/stagefright/foundation/AHierarchicalStateMachine.h>
 #include <media/stagefright/CodecBase.h>
 #include <media/stagefright/FrameRenderTracker.h>
+#include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/SkipCutBuffer.h>
+#include <utils/NativeHandle.h>
 #include <OMX_Audio.h>
 
 #define TRACK_BUFFER_TIMING     0
@@ -34,7 +36,8 @@
 
 struct ABuffer;
 struct MemoryDealer;
-struct DescribeColorFormatParams;
+struct DescribeColorFormat2Params;
+struct DataConverter;
 
 struct ACodec : public AHierarchicalStateMachine, public CodecBase {
     ACodec();
@@ -50,6 +53,10 @@
     virtual void initiateStart();
     virtual void initiateShutdown(bool keepComponentAllocated = false);
 
+    virtual status_t queryCapabilities(
+            const AString &name, const AString &mime, bool isEncoder,
+            sp<MediaCodecInfo::Capabilities> *caps);
+
     virtual status_t setSurface(const sp<Surface> &surface);
 
     virtual void signalFlush();
@@ -68,15 +75,21 @@
         size_t countBuffers();
         IOMX::buffer_id bufferIDAt(size_t index) const;
         sp<ABuffer> bufferAt(size_t index) const;
+        sp<NativeHandle> handleAt(size_t index) const;
+        sp<RefBase> memRefAt(size_t index) const;
 
     private:
         friend struct ACodec;
 
         Vector<IOMX::buffer_id> mBufferIDs;
         Vector<sp<ABuffer> > mBuffers;
+        Vector<sp<NativeHandle> > mHandles;
+        Vector<sp<RefBase> > mMemRefs;
 
         PortDescription();
-        void addBuffer(IOMX::buffer_id id, const sp<ABuffer> &buffer);
+        void addBuffer(
+                IOMX::buffer_id id, const sp<ABuffer> &buffer,
+                const sp<NativeHandle> &handle, const sp<RefBase> &memRef);
 
         DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
     };
@@ -91,6 +104,14 @@
             int width, int height, int rate, int bitrate,
             OMX_VIDEO_AVCPROFILETYPE profile = OMX_VIDEO_AVCProfileBaseline);
 
+    // Quirk still supported, even though deprecated
+    enum Quirks {
+        kRequiresAllocateBufferOnInputPorts   = 1,
+        kRequiresAllocateBufferOnOutputPorts  = 2,
+    };
+
+    static status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]);
+
 protected:
     virtual ~ACodec();
 
@@ -169,8 +190,13 @@
         Status mStatus;
         unsigned mDequeuedAt;
 
-        sp<ABuffer> mData;
+        sp<ABuffer> mData;      // the client's buffer; if not using data conversion, this is the
+                                // codec buffer; otherwise, it is allocated separately
+        sp<RefBase> mMemRef;    // and a reference to the IMemory, so it does not go away
+        sp<ABuffer> mCodecData; // the codec's buffer
+        sp<RefBase> mCodecRef;  // and a reference to the IMemory
         sp<GraphicBuffer> mGraphicBuffer;
+        sp<NativeHandle> mNativeHandle;
         int mFenceFd;
         FrameRenderTracker::Info *mRenderInfo;
 
@@ -217,13 +243,21 @@
     uint32_t mFlags;
     uint32_t mQuirks;
     sp<IOMX> mOMX;
+    sp<IBinder> mNodeBinder;
     IOMX::node_id mNode;
     sp<MemoryDealer> mDealer[2];
 
+    bool mUsingNativeWindow;
     sp<ANativeWindow> mNativeWindow;
     int mNativeWindowUsageBits;
+    android_native_rect_t mLastNativeWindowCrop;
+    int32_t mLastNativeWindowDataSpace;
+    sp<AMessage> mConfigFormat;
     sp<AMessage> mInputFormat;
     sp<AMessage> mOutputFormat;
+
+    // Initial output format + configuration params that is reused as the base for all subsequent
+    // format updates. This will equal to mOutputFormat until the first actual frame is received.
     sp<AMessage> mBaseOutputFormat;
 
     FrameRenderTracker mRenderTracker; // render information for buffers rendered by ACodec
@@ -233,12 +267,13 @@
 
     List<sp<AMessage> > mDeferredQueue;
 
-    bool mSentFormat;
+    sp<AMessage> mLastOutputFormat;
     bool mIsVideo;
     bool mIsEncoder;
     bool mFatalError;
     bool mShutdownInProgress;
     bool mExplicitShutdown;
+    bool mIsLegacyVP9Decoder;
 
     // If "mKeepComponentAllocated" we only transition back to Loaded state
     // and do not release the component instance.
@@ -256,6 +291,7 @@
     bool mLegacyAdaptiveExperiment;
     int32_t mMetadataBuffersToSubmit;
     size_t mNumUndequeuedBuffers;
+    sp<DataConverter> mConverter[2];
 
     int64_t mRepeatFrameDelayUs;
     int64_t mMaxPtsGapUs;
@@ -268,6 +304,9 @@
 
     bool mTunneled;
 
+    OMX_INDEXTYPE mDescribeColorAspectsIndex;
+    OMX_INDEXTYPE mDescribeHDRStaticInfoIndex;
+
     status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode);
     status_t allocateBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffersOnPort(OMX_U32 portIndex);
@@ -275,11 +314,12 @@
 
     status_t handleSetSurface(const sp<Surface> &surface);
     status_t setupNativeWindowSizeFormatAndUsage(
-            ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */);
+            ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */,
+            bool reconnect);
 
     status_t configureOutputBuffersFromNativeWindow(
             OMX_U32 *nBufferCount, OMX_U32 *nBufferSize,
-            OMX_U32 *nMinUndequeuedBuffers);
+            OMX_U32 *nMinUndequeuedBuffers, bool preregister);
     status_t allocateOutputMetadataBuffers();
     status_t submitOutputMetadataBuffer();
     void signalSubmitOutputMetadataBufferIfEOS_workaround();
@@ -301,6 +341,10 @@
             ssize_t *index = NULL);
 
     status_t setComponentRole(bool isEncoder, const char *mime);
+    static const char *getComponentRole(bool isEncoder, const char *mime);
+    static status_t setComponentRole(
+            const sp<IOMX> &omx, IOMX::node_id node, const char *role);
+
     status_t configureCodec(const char *mime, const sp<AMessage> &msg);
 
     status_t configureTunneledVideoPlayback(int32_t audioHwSync,
@@ -315,16 +359,90 @@
     status_t setSupportedOutputFormat(bool getLegacyFlexibleFormat);
 
     status_t setupVideoDecoder(
-            const char *mime, const sp<AMessage> &msg, bool usingNativeBuffers);
+            const char *mime, const sp<AMessage> &msg, bool usingNativeBuffers, bool haveSwRenderer,
+            sp<AMessage> &outputformat);
 
     status_t setupVideoEncoder(
-            const char *mime, const sp<AMessage> &msg);
+            const char *mime, const sp<AMessage> &msg,
+            sp<AMessage> &outputformat, sp<AMessage> &inputformat);
 
     status_t setVideoFormatOnPort(
             OMX_U32 portIndex,
             int32_t width, int32_t height,
             OMX_VIDEO_CODINGTYPE compressionFormat, float frameRate = -1.0);
 
+    // gets index or sets it to 0 on error. Returns error from codec.
+    status_t initDescribeColorAspectsIndex();
+
+    // sets |params|. If |readBack| is true, it re-gets them afterwards if set succeeded.
+    // returns the codec error.
+    status_t setCodecColorAspects(DescribeColorAspectsParams &params, bool readBack = false);
+
+    // gets |params|; returns the codec error. |param| should not change on error.
+    status_t getCodecColorAspects(DescribeColorAspectsParams &params);
+
+    // gets dataspace guidance from codec and platform. |params| should be set up with the color
+    // aspects to use. If |tryCodec| is true, the codec is queried first. If it succeeds, we
+    // return OK. Otherwise, we fall back to the platform guidance and return the codec error;
+    // though, we return OK if the codec failed with UNSUPPORTED, as codec guidance is optional.
+    status_t getDataSpace(
+            DescribeColorAspectsParams &params, android_dataspace *dataSpace /* nonnull */,
+            bool tryCodec);
+
+    // sets color aspects for the encoder for certain |width/height| based on |configFormat|, and
+    // set resulting color config into |outputFormat|. If |usingNativeWindow| is true, we use
+    // video defaults if config is unspecified. Returns error from the codec.
+    status_t setColorAspectsForVideoDecoder(
+            int32_t width, int32_t height, bool usingNativeWindow,
+            const sp<AMessage> &configFormat, sp<AMessage> &outputFormat);
+
+    // gets color aspects for the encoder for certain |width/height| based on |configFormat|, and
+    // set resulting color config into |outputFormat|. If |dataSpace| is non-null, it requests
+    // dataspace guidance from the codec and platform and sets it into |dataSpace|. Returns the
+    // error from the codec.
+    status_t getColorAspectsAndDataSpaceForVideoDecoder(
+            int32_t width, int32_t height, const sp<AMessage> &configFormat,
+            sp<AMessage> &outputFormat, android_dataspace *dataSpace);
+
+    // sets color aspects for the video encoder assuming bytebuffer mode for certain |configFormat|
+    // and sets resulting color config into |outputFormat|. For mediarecorder, also set dataspace
+    // into |inputFormat|. Returns the error from the codec.
+    status_t setColorAspectsForVideoEncoder(
+            const sp<AMessage> &configFormat,
+            sp<AMessage> &outputFormat, sp<AMessage> &inputFormat);
+
+    // sets color aspects for the video encoder in surface mode. This basically sets the default
+    // video values for unspecified aspects and sets the dataspace to use in the input format.
+    // Also sets the dataspace into |dataSpace|.
+    // Returns any codec errors during this configuration, except for optional steps.
+    status_t setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(
+            android_dataspace *dataSpace /* nonnull */);
+
+    // gets color aspects for the video encoder input port and sets them into the |format|.
+    // Returns any codec errors.
+    status_t getInputColorAspectsForVideoEncoder(sp<AMessage> &format);
+
+    // updates the encoder output format with |aspects| defaulting to |dataSpace| for
+    // unspecified values.
+    void onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects);
+
+    // gets index or sets it to 0 on error. Returns error from codec.
+    status_t initDescribeHDRStaticInfoIndex();
+
+    // sets HDR static metadata for the video encoder/decoder based on |configFormat|, and
+    // sets resulting HDRStaticInfo config into |outputFormat|. Returns error from the codec.
+    status_t setHDRStaticInfoForVideoCodec(
+            OMX_U32 portIndex, const sp<AMessage> &configFormat, sp<AMessage> &outputFormat);
+
+    // sets |params|. Returns the codec error.
+    status_t setHDRStaticInfo(const DescribeHDRStaticInfoParams &params);
+
+    // gets |params|. Returns the codec error.
+    status_t getHDRStaticInfo(DescribeHDRStaticInfoParams &params);
+
+    // gets HDR static information for the video encoder/decoder port and sets them into |format|.
+    status_t getHDRStaticInfoForVideoCodec(OMX_U32 portIndex, sp<AMessage> &format);
+
     typedef struct drcParams {
         int32_t drcCut;
         int32_t drcBoost;
@@ -354,10 +472,13 @@
             bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel);
 
     status_t setupRawAudioFormat(
-            OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels);
+            OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels,
+            AudioEncoding encoding = kAudioEncodingPcm16bit);
 
     status_t setPriority(int32_t priority);
     status_t setOperatingRate(float rateFloat, bool isVideo);
+    status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
+    status_t setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure);
 
     status_t setMinBufferSize(OMX_U32 portIndex, size_t size);
 
@@ -407,17 +528,23 @@
     void notifyOfRenderedFrames(
             bool dropIncomplete = false, FrameRenderTracker::Info *until = NULL);
 
-    void sendFormatChange(const sp<AMessage> &reply);
+    // Pass |expectedFormat| to print a warning if the format differs from it.
+    // Using sp<> instead of const sp<>& because expectedFormat is likely the current mOutputFormat
+    // which will get updated inside.
+    void onOutputFormatChanged(sp<const AMessage> expectedFormat = NULL);
+    void addKeyFormatChangesToRenderBufferNotification(sp<AMessage> &notify);
+    void sendFormatChange();
+
     status_t getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify);
 
     void signalError(
             OMX_ERRORTYPE error = OMX_ErrorUndefined,
             status_t internalError = UNKNOWN_ERROR);
 
-    static bool describeDefaultColorFormat(DescribeColorFormatParams &describeParams);
+    static bool describeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
     static bool describeColorFormat(
         const sp<IOMX> &omx, IOMX::node_id node,
-        DescribeColorFormatParams &describeParams);
+        DescribeColorFormat2Params &describeParams);
 
     status_t requestIDRFrame();
     status_t setParameters(const sp<AMessage> &params);
diff --git a/include/media/stagefright/AMRWriter.h b/include/media/stagefright/AMRWriter.h
index b38be55..fbbdf2e 100644
--- a/include/media/stagefright/AMRWriter.h
+++ b/include/media/stagefright/AMRWriter.h
@@ -20,12 +20,12 @@
 
 #include <stdio.h>
 
+#include <media/IMediaSource.h>
 #include <media/stagefright/MediaWriter.h>
 #include <utils/threads.h>
 
 namespace android {
 
-struct MediaSource;
 class MetaData;
 
 struct AMRWriter : public MediaWriter {
@@ -33,7 +33,7 @@
 
     status_t initCheck() const;
 
-    virtual status_t addSource(const sp<MediaSource> &source);
+    virtual status_t addSource(const sp<IMediaSource> &source);
     virtual bool reachedEOS();
     virtual status_t start(MetaData *params = NULL);
     virtual status_t stop() { return reset(); }
@@ -45,7 +45,7 @@
 private:
     int   mFd;
     status_t mInitCheck;
-    sp<MediaSource> mSource;
+    sp<IMediaSource> mSource;
     bool mStarted;
     volatile bool mPaused;
     volatile bool mResumed;
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index e0cd965..f7499b6 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -18,9 +18,9 @@
 
 #define AUDIO_PLAYER_H_
 
+#include <media/IMediaSource.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/TimeSource.h>
 #include <utils/threads.h>
 
 namespace android {
@@ -28,9 +28,8 @@
 struct AudioPlaybackRate;
 class AudioTrack;
 struct AwesomePlayer;
-class MediaSource;
 
-class AudioPlayer : public TimeSource {
+class AudioPlayer {
 public:
     enum {
         REACHED_EOS,
@@ -46,29 +45,18 @@
     };
 
     AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink,
-                uint32_t flags = 0,
-                AwesomePlayer *audioObserver = NULL);
+                uint32_t flags = 0);
 
     virtual ~AudioPlayer();
 
     // Caller retains ownership of "source".
-    void setSource(const sp<MediaSource> &source);
-
-    // Return time in us.
-    virtual int64_t getRealTimeUs();
+    void setSource(const sp<IMediaSource> &source);
 
     status_t start(bool sourceAlreadyStarted = false);
 
     void pause(bool playPendingSamples = false);
     status_t resume();
 
-    // Returns the timestamp of the last buffer played (in us).
-    int64_t getMediaTimeUs();
-
-    // Returns true iff a mapping is established, i.e. the AudioPlayer
-    // has played at least one frame of audio.
-    bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
-
     status_t seekTo(int64_t time_us);
 
     bool isSeeking();
@@ -77,11 +65,8 @@
     status_t setPlaybackRate(const AudioPlaybackRate &rate);
     status_t getPlaybackRate(AudioPlaybackRate *rate /* nonnull */);
 
-    void notifyAudioEOS();
-
 private:
-    friend class VideoEditorAudioPlayer;
-    sp<MediaSource> mSource;
+    sp<IMediaSource> mSource;
     sp<AudioTrack> mAudioTrack;
 
     MediaBuffer *mInputBuffer;
@@ -109,8 +94,6 @@
     MediaBuffer *mFirstBuffer;
 
     sp<MediaPlayerBase::AudioSink> mAudioSink;
-    AwesomePlayer *mObserver;
-    int64_t mPinnedTimeUs;
 
     bool mPlaying;
     int64_t mStartPosUs;
@@ -126,11 +109,8 @@
 
     size_t fillBuffer(void *data, size_t size);
 
-    int64_t getRealTimeUsLocked() const;
-
     void reset();
 
-    uint32_t getNumFramesPendingPlayout() const;
     int64_t getOutputPlayPositionUs_l();
 
     bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; }
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 3074910..8fc410d 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -38,7 +38,9 @@
             const String16 &opPackageName,
             uint32_t sampleRate,
             uint32_t channels,
-            uint32_t outSampleRate = 0);
+            uint32_t outSampleRate = 0,
+            uid_t uid = -1,
+            pid_t pid = -1);
 
     status_t initCheck() const;
 
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 6c938a5..c2e75a6 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -20,12 +20,15 @@
 
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaSource.h>
-#include <camera/ICamera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <camera/ICameraRecordingProxy.h>
 #include <camera/ICameraRecordingProxyListener.h>
 #include <camera/CameraParameters.h>
+#include <gui/BufferItemConsumer.h>
 #include <utils/List.h>
 #include <utils/RefBase.h>
 #include <utils/String16.h>
+#include <MetadataBufferType.h>
 
 namespace android {
 
@@ -60,6 +63,8 @@
      *          permissions checking.
      * @param clientUid the UID of the camera-using application if camera is
      *          NULL; otherwise ignored. Used for permissions checking.
+     * @param clientPid the PID of the camera-using application if camera is
+     *          NULL; otherwise ignored. Used for permissions checking.
      * @param videoSize the dimension (in pixels) of the video frame
      * @param frameRate the target frames per second
      * @param surface the preview surface for display where preview
@@ -75,11 +80,12 @@
      *
      * @return NULL on error.
      */
-    static CameraSource *CreateFromCamera(const sp<ICamera> &camera,
+    static CameraSource *CreateFromCamera(const sp<hardware::ICamera> &camera,
                                           const sp<ICameraRecordingProxy> &proxy,
                                           int32_t cameraId,
                                           const String16& clientName,
                                           uid_t clientUid,
+                                          pid_t clientPid,
                                           Size videoSize,
                                           int32_t frameRate,
                                           const sp<IGraphicBufferProducer>& surface,
@@ -113,25 +119,55 @@
      * Tell whether this camera source stores meta data or real YUV
      * frame data in video buffers.
      *
-     * @return true if meta data is stored in the video
-     *      buffers; false if real YUV data is stored in
+     * @return a valid type if meta data is stored in the video
+     *      buffers; kMetadataBufferTypeInvalid if real YUV data is stored in
      *      the video buffers.
      */
-    bool isMetaDataStoredInVideoBuffers() const;
+    MetadataBufferType metaDataStoredInVideoBuffers() const;
 
     virtual void signalBufferReturned(MediaBuffer* buffer);
 
 protected:
+
+    /**
+     * The class for listening to BnCameraRecordingProxyListener. This is used to receive video
+     * buffers in VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV and VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA
+     * mode. When a frame is available, CameraSource::dataCallbackTimestamp() will be called.
+     */
     class ProxyListener: public BnCameraRecordingProxyListener {
     public:
         ProxyListener(const sp<CameraSource>& source);
         virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
                 const sp<IMemory> &data);
+        virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
+                native_handle_t* handle);
 
     private:
         sp<CameraSource> mSource;
     };
 
+    /**
+     * The class for listening to BufferQueue's onFrameAvailable. This is used to receive video
+     * buffers in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode. When a frame is available,
+     * CameraSource::processBufferQueueFrame() will be called.
+     */
+    class BufferQueueListener : public Thread,  public BufferItemConsumer::FrameAvailableListener {
+    public:
+        BufferQueueListener(const sp<BufferItemConsumer> &consumer,
+                const sp<CameraSource> &cameraSource);
+        virtual void onFrameAvailable(const BufferItem& item);
+        virtual bool threadLoop();
+    private:
+        static const nsecs_t kFrameAvailableTimeout = 50000000; // 50ms
+
+        sp<BufferItemConsumer> mConsumer;
+        sp<CameraSource> mCameraSource;
+
+        Mutex mLock;
+        Condition mFrameAvailableSignal;
+        bool mFrameAvailable;
+    };
+
     // isBinderAlive needs linkToDeath to work.
     class DeathNotifier: public IBinder::DeathRecipient {
     public:
@@ -168,14 +204,15 @@
     // Time between capture of two frames.
     int64_t mTimeBetweenFrameCaptureUs;
 
-    CameraSource(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
-                 int32_t cameraId, const String16& clientName, uid_t clientUid,
+    CameraSource(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
+                 int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
                  Size videoSize, int32_t frameRate,
                  const sp<IGraphicBufferProducer>& surface,
                  bool storeMetaDataInVideoBuffers);
 
     virtual status_t startCameraRecording();
     virtual void releaseRecordingFrame(const sp<IMemory>& frame);
+    virtual void releaseRecordingFrameHandle(native_handle_t* handle);
 
     // Returns true if need to skip the current frame.
     // Called from dataCallbackTimestamp.
@@ -187,6 +224,12 @@
     virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
             const sp<IMemory> &data);
 
+    virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
+            native_handle_t* handle);
+
+    // Process a buffer item received in BufferQueueListener.
+    virtual void processBufferQueueFrame(BufferItem& buffer);
+
     void releaseCamera();
 
 private:
@@ -204,26 +247,53 @@
     int32_t mNumGlitches;
     int64_t mGlitchDurationThresholdUs;
     bool mCollectStats;
-    bool mIsMetaDataStoredInVideoBuffers;
+
+    // The mode video buffers are received from camera. One of VIDEO_BUFFER_MODE_*.
+    int32_t mVideoBufferMode;
+
+    static const uint32_t kDefaultVideoBufferCount = 32;
+
+    /**
+     * The following variables are used in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
+     */
+    static const size_t kConsumerBufferCount = 8;
+    static const nsecs_t kMemoryBaseAvailableTimeoutNs = 200000000; // 200ms
+    // Consumer and producer of the buffer queue between this class and camera.
+    sp<BufferItemConsumer> mVideoBufferConsumer;
+    sp<IGraphicBufferProducer> mVideoBufferProducer;
+    // Memory used to send the buffers to encoder, where sp<IMemory> stores VideoNativeMetadata.
+    sp<IMemoryHeap> mMemoryHeapBase;
+    List<sp<IMemory>> mMemoryBases;
+    // The condition that will be signaled when there is an entry available in mMemoryBases.
+    Condition mMemoryBaseAvailableCond;
+    // A mapping from ANativeWindowBuffer sent to encoder to BufferItem received from camera.
+    // This is protected by mLock.
+    KeyedVector<ANativeWindowBuffer*, BufferItem> mReceivedBufferItemMap;
+    sp<BufferQueueListener> mBufferQueueListener;
 
     void releaseQueuedFrames();
     void releaseOneRecordingFrame(const sp<IMemory>& frame);
+    void createVideoBufferMemoryHeap(size_t size, uint32_t bufferCount);
 
-
-    status_t init(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
-                  int32_t cameraId, const String16& clientName, uid_t clientUid,
+    status_t init(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
+                  int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
                   Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers);
 
     status_t initWithCameraAccess(
-                  const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
-                  int32_t cameraId, const String16& clientName, uid_t clientUid,
+                  const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
+                  int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
                   Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers);
 
-    status_t isCameraAvailable(const sp<ICamera>& camera,
+    // Initialize the buffer queue used in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
+    status_t initBufferQueue(uint32_t width, uint32_t height, uint32_t format,
+                  android_dataspace dataSpace, uint32_t bufferCount);
+
+    status_t isCameraAvailable(const sp<hardware::ICamera>& camera,
                                const sp<ICameraRecordingProxy>& proxy,
                                int32_t cameraId,
                                const String16& clientName,
-                               uid_t clientUid);
+                               uid_t clientUid,
+                               pid_t clientPid);
 
     status_t isCameraColorFormatSupported(const CameraParameters& params);
     status_t configureCamera(CameraParameters* params,
@@ -236,8 +306,9 @@
     status_t checkFrameRate(const CameraParameters& params,
                     int32_t frameRate);
 
-    static void adjustIncomingANWBuffer(IMemory* data);
-    static void adjustOutgoingANWBuffer(IMemory* data);
+    // Check if this frame should be skipped based on the frame's timestamp in microsecond.
+    // mLock must be locked before calling this function.
+    bool shouldSkipFrameLocked(int64_t timestampUs);
 
     void stopCameraRecording();
     status_t reset();
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index 34213be..871c1d9 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -26,18 +26,22 @@
 
 namespace android {
 
+namespace hardware {
 class ICamera;
+}
+
 class IMemory;
 class Camera;
 
 class CameraSourceTimeLapse : public CameraSource {
 public:
     static CameraSourceTimeLapse *CreateFromCamera(
-        const sp<ICamera> &camera,
+        const sp<hardware::ICamera> &camera,
         const sp<ICameraRecordingProxy> &proxy,
         int32_t cameraId,
         const String16& clientName,
         uid_t clientUid,
+        pid_t clientPid,
         Size videoSize,
         int32_t videoFrameRate,
         const sp<IGraphicBufferProducer>& surface,
@@ -109,11 +113,12 @@
     status_t mLastReadStatus;
 
     CameraSourceTimeLapse(
-        const sp<ICamera> &camera,
+        const sp<hardware::ICamera> &camera,
         const sp<ICameraRecordingProxy> &proxy,
         int32_t cameraId,
         const String16& clientName,
         uid_t clientUid,
+        pid_t clientPid,
         Size videoSize,
         int32_t videoFrameRate,
         const sp<IGraphicBufferProducer>& surface,
@@ -135,9 +140,23 @@
     // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
     // timestamp and set mSkipCurrentFrame.
     // Then it calls the base CameraSource::dataCallbackTimestamp()
+    // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV and
+    // VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode.
     virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
             const sp<IMemory> &data);
 
+    // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
+    // timestamp and set mSkipCurrentFrame.
+    // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp()
+    // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
+    // the metadata is VideoNativeHandleMetadata.
+    virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
+            native_handle_t* handle);
+
+    // Process a buffer item received in CameraSource::BufferQueueListener.
+    // This will be called in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
+    virtual void processBufferQueueFrame(BufferItem& buffer);
+
     // Convenience function to fill mLastReadBufferCopy from the just read
     // buffer.
     void fillLastReadBufferCopy(MediaBuffer& sourceBuffer);
diff --git a/include/media/stagefright/ClockEstimator.h b/include/media/stagefright/ClockEstimator.h
deleted file mode 100644
index 1455b7f..0000000
--- a/include/media/stagefright/ClockEstimator.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
-**
-** Copyright 2014, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef CLOCK_ESTIMATOR_H_
-
-#define CLOCK_ESTIMATOR_H_
-
-#include "foundation/ABase.h"
-#include <utils/RefBase.h>
-#include <utils/Vector.h>
-
-namespace android {
-// ---------------------------------------------------------------------------
-
-struct ClockEstimator : RefBase {
-    virtual double estimate(double x, double y) = 0;
-    virtual void reset() = 0;
-};
-
-struct WindowedLinearFitEstimator : ClockEstimator {
-    struct LinearFit {
-        /**
-         * Fit y = a * x + b, where each input has a weight
-         */
-        double mX;  // sum(w_i * x_i)
-        double mXX; // sum(w_i * x_i^2)
-        double mY;  // sum(w_i * y_i)
-        double mYY; // sum(w_i * y_i^2)
-        double mXY; // sum(w_i * x_i * y_i)
-        double mW;  // sum(w_i)
-
-        LinearFit();
-        void reset();
-        void combine(const LinearFit &lf);
-        void add(double x, double y, double w);
-        void scale(double w);
-        double interpolate(double x);
-        double size() const;
-
-        DISALLOW_EVIL_CONSTRUCTORS(LinearFit);
-    };
-
-    /**
-     * Estimator for f(x) = y' where input y' is noisy, but
-     * theoretically linear:
-     *
-     *      y' =~ y = a * x + b
-     *
-     * It uses linear fit regression over a tapering rolling window
-     * to get an estimate for y (from the current and past inputs
-     * (x, y')).
-     *
-     *     ____________
-     *    /|          |\
-     *   / |          | \
-     *  /  |          |  \   <--- new data (x, y')
-     * /   |   main   |   \
-     * <--><----------><-->
-     * tail            head
-     *
-     * weight is 1 under the main window, tapers exponentially by
-     * the factors given in the head and the tail.
-     *
-     * Assuming that x and y' are monotonic, that x is somewhat
-     * evenly sampled, and that a =~ 1, the estimated y is also
-     * going to be monotonic.
-     */
-    WindowedLinearFitEstimator(
-            size_t headLength = 5, double headFactor = 0.5,
-            size_t mainLength = 0, double tailFactor = 0.99);
-
-    virtual void reset();
-
-    // add a new sample (x -> y') and return an estimated value for the true y
-    virtual double estimate(double x, double y);
-
-private:
-    Vector<double> mXHistory; // circular buffer
-    Vector<double> mYHistory; // circular buffer
-    LinearFit mHead;
-    LinearFit mMain;
-    LinearFit mTail;
-    double mHeadFactorInv;
-    double mTailFactor;
-    double mFirstWeight;
-    size_t mHistoryLength;
-    size_t mHeadLength;
-    size_t mNumSamples;
-    size_t mSampleIx;
-
-    DISALLOW_EVIL_CONSTRUCTORS(WindowedLinearFitEstimator);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/media/stagefright/CodecBase.h b/include/media/stagefright/CodecBase.h
index bb36052..be2835d 100644
--- a/include/media/stagefright/CodecBase.h
+++ b/include/media/stagefright/CodecBase.h
@@ -19,16 +19,25 @@
 #define CODEC_BASE_H_
 
 #include <stdint.h>
-#include <media/IOMX.h>
 
+#define STRINGIFY_ENUMS
+
+#include <media/IOMX.h>
+#include <media/MediaCodecInfo.h>
 #include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/hardware/HardwareAPI.h>
+
+#include <utils/NativeHandle.h>
+
+#include <system/graphics.h>
 
 namespace android {
 
 struct ABuffer;
 struct PersistentSurface;
 
-struct CodecBase : public AHandler {
+struct CodecBase : public AHandler, /* static */ ColorUtils {
     enum {
         kWhatFillThisBuffer      = 'fill',
         kWhatDrainThisBuffer     = 'drai',
@@ -46,6 +55,10 @@
         kWhatOutputFramesRendered = 'outR',
     };
 
+    enum {
+        kMaxCodecBufferSize = 8192 * 4096 * 4, // 8K RGBA
+    };
+
     virtual void setNotificationMessage(const sp<AMessage> &msg) = 0;
 
     virtual void initiateAllocateComponent(const sp<AMessage> &msg) = 0;
@@ -59,6 +72,10 @@
     // require an explicit message handler
     virtual void onMessageReceived(const sp<AMessage> &msg) = 0;
 
+    virtual status_t queryCapabilities(
+            const AString &name, const AString &mime, bool isEncoder,
+            sp<MediaCodecInfo::Capabilities> *caps /* nonnull */) { return INVALID_OPERATION; }
+
     virtual status_t setSurface(const sp<Surface> &surface) { return INVALID_OPERATION; }
 
     virtual void signalFlush() = 0;
@@ -72,6 +89,8 @@
         virtual size_t countBuffers() = 0;
         virtual IOMX::buffer_id bufferIDAt(size_t index) const = 0;
         virtual sp<ABuffer> bufferAt(size_t index) const = 0;
+        virtual sp<NativeHandle> handleAt(size_t index) const { return NULL; };
+        virtual sp<RefBase> memRefAt(size_t index) const { return NULL; }
 
     protected:
         PortDescription();
@@ -81,6 +100,10 @@
         DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
     };
 
+    /*
+     * Codec-related defines
+     */
+
 protected:
     CodecBase();
     virtual ~CodecBase();
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 47c5c34..0254545 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -92,6 +92,10 @@
         return 0;
     }
 
+    virtual String8 toString() {
+        return String8("<unspecified>");
+    }
+
     virtual status_t reconnectAtOffset(off64_t offset) {
         return ERROR_UNSUPPORTED;
     }
@@ -121,6 +125,8 @@
 
     virtual String8 getMIMEType() const;
 
+    virtual void close() {};
+
 protected:
     virtual ~DataSource() {}
 
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index a981d1c..b6349e0 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -43,6 +43,10 @@
 
     virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
 
+    virtual String8 toString() {
+        return mName;
+    }
+
 protected:
     virtual ~FileSource();
 
@@ -51,12 +55,13 @@
     int64_t mOffset;
     int64_t mLength;
     Mutex mLock;
+    String8 mName;
 
     /*for DRM*/
     sp<DecryptHandle> mDecryptHandle;
     DrmManagerClient *mDrmManagerClient;
     int64_t mDrmBufOffset;
-    size_t mDrmBufSize;
+    ssize_t mDrmBufSize;
     unsigned char *mDrmBuf;
 
     ssize_t readAtDRM(off64_t offset, void *data, size_t size);
diff --git a/include/media/stagefright/MPEG2TSWriter.h b/include/media/stagefright/MPEG2TSWriter.h
index 3d7960b..4516fb6 100644
--- a/include/media/stagefright/MPEG2TSWriter.h
+++ b/include/media/stagefright/MPEG2TSWriter.h
@@ -34,7 +34,7 @@
             void *cookie,
             ssize_t (*write)(void *cookie, const void *data, size_t size));
 
-    virtual status_t addSource(const sp<MediaSource> &source);
+    virtual status_t addSource(const sp<IMediaSource> &source);
     virtual status_t start(MetaData *param = NULL);
     virtual status_t stop() { return reset(); }
     virtual status_t pause();
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index a195fe8..a6901a8 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -20,6 +20,7 @@
 
 #include <stdio.h>
 
+#include <media/IMediaSource.h>
 #include <media/stagefright/MediaWriter.h>
 #include <utils/List.h>
 #include <utils/threads.h>
@@ -28,7 +29,6 @@
 
 class AMessage;
 class MediaBuffer;
-class MediaSource;
 class MetaData;
 
 class MPEG4Writer : public MediaWriter {
@@ -39,7 +39,7 @@
     // 1. No more than 2 tracks can be added
     // 2. Only video or audio source can be added
     // 3. No more than one video and/or one audio source can be added.
-    virtual status_t addSource(const sp<MediaSource> &source);
+    virtual status_t addSource(const sp<IMediaSource> &source);
 
     // Returns INVALID_OPERATION if there is no source or track.
     virtual status_t start(MetaData *param = NULL);
diff --git a/include/media/stagefright/MediaBuffer.h b/include/media/stagefright/MediaBuffer.h
index c8a50e8..18b80e3 100644
--- a/include/media/stagefright/MediaBuffer.h
+++ b/include/media/stagefright/MediaBuffer.h
@@ -22,6 +22,7 @@
 
 #include <pthread.h>
 
+#include <binder/MemoryDealer.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 
@@ -47,6 +48,9 @@
 
 class MediaBuffer : public MediaBufferBase {
 public:
+    // allocations larger than or equal to this will use shared memory.
+    static const size_t kSharedMemThreshold = 64 * 1024;
+
     // The underlying data remains the responsibility of the caller!
     MediaBuffer(void *data, size_t size);
 
@@ -93,6 +97,8 @@
 private:
     friend class MediaBufferGroup;
     friend class OMXDecoder;
+    friend class BnMediaSource;
+    friend class BpMediaSource;
 
     // For use by OMXDecoder, reference count must be 1, drop reference
     // count to 0 without signalling the observer.
@@ -118,6 +124,7 @@
 
     MediaBuffer(const MediaBuffer &);
     MediaBuffer &operator=(const MediaBuffer &);
+    sp<IMemory> mMemory;
 };
 
 }  // namespace android
diff --git a/include/media/stagefright/MediaBufferGroup.h b/include/media/stagefright/MediaBufferGroup.h
index a006f7f..7ca3fa1 100644
--- a/include/media/stagefright/MediaBufferGroup.h
+++ b/include/media/stagefright/MediaBufferGroup.h
@@ -39,7 +39,11 @@
     // The returned buffer will have a reference count of 1.
     // If nonBlocking is true and a buffer is not immediately available,
     // buffer is set to NULL and it returns WOULD_BLOCK.
-    status_t acquire_buffer(MediaBuffer **buffer, bool nonBlocking = false);
+    // If requestedSize is 0, any free MediaBuffer will be returned.
+    // If requestedSize is > 0, the returned MediaBuffer should have buffer
+    // size of at least requstedSize.
+    status_t acquire_buffer(
+            MediaBuffer **buffer, bool nonBlocking = false, size_t requestedSize = 0);
 
 protected:
     virtual void signalBufferReturned(MediaBuffer *buffer);
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index cdfa159..fe579b7 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -20,6 +20,7 @@
 
 #include <gui/IGraphicBufferProducer.h>
 #include <media/hardware/CryptoAPI.h>
+#include <media/MediaCodecInfo.h>
 #include <media/MediaResource.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/FrameRenderTracker.h>
@@ -64,15 +65,20 @@
     static const pid_t kNoPid = -1;
 
     static sp<MediaCodec> CreateByType(
-            const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err = NULL,
+            const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err = NULL,
             pid_t pid = kNoPid);
 
     static sp<MediaCodec> CreateByComponentName(
-            const sp<ALooper> &looper, const char *name, status_t *err = NULL,
+            const sp<ALooper> &looper, const AString &name, status_t *err = NULL,
             pid_t pid = kNoPid);
 
     static sp<PersistentSurface> CreatePersistentInputSurface();
 
+    // utility method to query capabilities
+    static status_t QueryCapabilities(
+            const AString &name, const AString &mime, bool isEncoder,
+            sp<MediaCodecInfo::Capabilities> *caps /* nonnull */);
+
     status_t configure(
             const sp<AMessage> &format,
             const sp<Surface> &nativeWindow,
@@ -119,6 +125,7 @@
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
+            const CryptoPlugin::Pattern &pattern,
             int64_t presentationTimeUs,
             uint32_t flags,
             AString *errorDetailMsg = NULL);
@@ -247,6 +254,8 @@
     struct BufferInfo {
         uint32_t mBufferID;
         sp<ABuffer> mData;
+        sp<NativeHandle> mNativeHandle;
+        sp<RefBase> mMemRef;
         sp<ABuffer> mEncryptedData;
         sp<IMemory> mSharedEncryptedBuffer;
         sp<AMessage> mNotify;
@@ -339,6 +348,8 @@
 
     MediaCodec(const sp<ALooper> &looper, pid_t pid);
 
+    static sp<CodecBase> GetCodecBase(const AString &name, bool nameIsType = false);
+
     static status_t PostAndAwaitResponse(
             const sp<AMessage> &msg, sp<AMessage> *response);
 
@@ -347,8 +358,8 @@
     status_t init(const AString &name, bool nameIsType, bool encoder);
 
     void setState(State newState);
-    void returnBuffersToCodec();
-    void returnBuffersToCodecOnPort(int32_t portIndex);
+    void returnBuffersToCodec(bool isReclaim = false);
+    void returnBuffersToCodecOnPort(int32_t portIndex, bool isReclaim = false);
     size_t updateBuffers(int32_t portIndex, const sp<AMessage> &msg);
     status_t onQueueInputBuffer(const sp<AMessage> &msg);
     status_t onReleaseOutputBuffer(const sp<AMessage> &msg);
@@ -383,7 +394,7 @@
     bool isExecuting() const;
 
     uint64_t getGraphicBufferSize();
-    void addResource(const String8 &type, const String8 &subtype, uint64_t value);
+    void addResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
 
     bool hasPendingBuffer(int portIndex);
     bool hasPendingBuffer();
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index bf4db87..44dbde0 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -65,6 +65,22 @@
     // only to be used by MediaPlayerService
     void parseTopLevelXMLFile(const char *path, bool ignore_errors = false);
 
+    enum Flags {
+        kPreferSoftwareCodecs   = 1,
+        kHardwareCodecsOnly     = 2,
+    };
+
+    static void findMatchingCodecs(
+            const char *mime,
+            bool createEncoder,
+            uint32_t flags,
+            Vector<AString> *matching);
+
+    static uint32_t getQuirksFor(const char *mComponentName);
+
+    static bool isSoftwareCodec(const AString &componentName);
+
+
 private:
     class BinderDeathObserver : public IBinder::DeathRecipient {
         void binderDied(const wp<IBinder> &the_late_who __unused);
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index 71f58a9..cc62786 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -19,23 +19,25 @@
 
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/stagefright/foundation/Mutexed.h>
 #include <media/stagefright/MediaSource.h>
 
+#include <gui/IGraphicBufferConsumer.h>
+
 namespace android {
 
 struct ALooper;
-class AMessage;
+struct AMessage;
 struct AReplyToken;
 class IGraphicBufferProducer;
-class IGraphicBufferConsumer;
-class MediaCodec;
+struct MediaCodec;
 class MetaData;
 
 struct MediaCodecSource : public MediaSource,
                           public MediaBufferObserver {
     enum FlagBits {
         FLAG_USE_SURFACE_INPUT      = 1,
-        FLAG_USE_METADATA_INPUT     = 2,
+        FLAG_PREFER_SOFTWARE_CODEC  = 4,  // used for testing only
     };
 
     static sp<MediaCodecSource> Create(
@@ -47,12 +49,13 @@
 
     bool isVideo() const { return mIsVideo; }
     sp<IGraphicBufferProducer> getGraphicBufferProducer();
+    void setInputBufferTimeOffset(int64_t timeOffsetUs);
 
     // MediaSource
     virtual status_t start(MetaData *params = NULL);
     virtual status_t stop();
     virtual status_t pause();
-    virtual sp<MetaData> getFormat() { return mMeta; }
+    virtual sp<MetaData> getFormat();
     virtual status_t read(
             MediaBuffer **buffer,
             const ReadOptions *options = NULL);
@@ -75,6 +78,8 @@
         kWhatStart,
         kWhatStop,
         kWhatPause,
+        kWhatSetInputBufferTimeOffset,
+        kWhatStopStalled,
     };
 
     MediaCodecSource(
@@ -99,7 +104,7 @@
     sp<ALooper> mCodecLooper;
     sp<AHandlerReflector<MediaCodecSource> > mReflector;
     sp<AMessage> mOutputFormat;
-    sp<MetaData> mMeta;
+    Mutexed<sp<MetaData>> mMeta;
     sp<Puller> mPuller;
     sp<MediaCodec> mEncoder;
     uint32_t mFlags;
@@ -109,25 +114,30 @@
     bool mStopping;
     bool mDoMoreWorkPending;
     bool mSetEncoderFormat;
-    int mEncoderFormat;
-    int mEncoderDataSpace;
+    int32_t mEncoderFormat;
+    int32_t mEncoderDataSpace;
     sp<AMessage> mEncoderActivityNotify;
     sp<IGraphicBufferProducer> mGraphicBufferProducer;
     sp<IGraphicBufferConsumer> mGraphicBufferConsumer;
     List<MediaBuffer *> mInputBufferQueue;
     List<size_t> mAvailEncoderInputIndices;
     List<int64_t> mDecodingTimeQueue; // decoding time (us) for video
+    int64_t mInputBufferTimeOffsetUs;
 
     // audio drift time
     int64_t mFirstSampleTimeUs;
     List<int64_t> mDriftTimeQueue;
 
-    // following variables are protected by mOutputBufferLock
-    Mutex mOutputBufferLock;
-    Condition mOutputBufferCond;
-    List<MediaBuffer*> mOutputBufferQueue;
-    bool mEncoderReachedEOS;
-    status_t mErrorCode;
+    struct Output {
+        Output();
+        List<MediaBuffer*> mBufferQueue;
+        bool mEncoderReachedEOS;
+        status_t mErrorCode;
+        Condition mCond;
+    };
+    Mutexed<Output> mOutput;
+
+    int32_t mGeneration;
 
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecSource);
 };
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 21eb04a..5f2a32d 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -30,6 +30,7 @@
 extern const char *MEDIA_MIMETYPE_VIDEO_H263;
 extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
 extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
+extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
 
 extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
 extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
@@ -64,8 +65,18 @@
 extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
 extern const char *MEDIA_MIMETYPE_TEXT_VTT;
 extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
 extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
 
+// These are values exported to JAVA API that need to be in sync with
+// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
+// they are not defined in frameworks/av, so defining them here.
+enum AudioEncoding {
+    kAudioEncodingPcm16bit = 2,
+    kAudioEncodingPcm8bit = 3,
+    kAudioEncodingPcmFloat = 4,
+};
+
 }  // namespace android
 
 #endif  // MEDIA_DEFS_H_
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index 183933a..6bf8c9e 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -18,7 +18,8 @@
 
 #define MEDIA_EXTRACTOR_H_
 
-#include <utils/RefBase.h>
+#include <media/IMediaExtractor.h>
+#include <media/IMediaSource.h>
 
 namespace android {
 
@@ -26,13 +27,15 @@
 class MediaSource;
 class MetaData;
 
-class MediaExtractor : public RefBase {
+class MediaExtractor : public BnMediaExtractor {
 public:
-    static sp<MediaExtractor> Create(
+    static sp<IMediaExtractor> Create(
+            const sp<DataSource> &source, const char *mime = NULL);
+    static sp<MediaExtractor> CreateFromService(
             const sp<DataSource> &source, const char *mime = NULL);
 
     virtual size_t countTracks() = 0;
-    virtual sp<MediaSource> getTrack(size_t index) = 0;
+    virtual sp<IMediaSource> getTrack(size_t index) = 0;
 
     enum GetTrackMetaDataFlags {
         kIncludeExtensiveMetaData = 1
@@ -68,8 +71,10 @@
     virtual void setUID(uid_t uid) {
     }
 
+    virtual const char * name() { return "<unspecified>"; }
+
 protected:
-    MediaExtractor() : mIsDrm(false) {}
+    MediaExtractor();
     virtual ~MediaExtractor() {}
 
 private:
diff --git a/include/media/stagefright/MediaSource.h b/include/media/stagefright/MediaSource.h
index a653db9..1bd3ed0 100644
--- a/include/media/stagefright/MediaSource.h
+++ b/include/media/stagefright/MediaSource.h
@@ -20,6 +20,7 @@
 
 #include <sys/types.h>
 
+#include <media/IMediaSource.h>
 #include <media/stagefright/MediaErrors.h>
 #include <utils/RefBase.h>
 #include <utils/Vector.h>
@@ -29,7 +30,7 @@
 class MediaBuffer;
 class MetaData;
 
-struct MediaSource : public virtual RefBase {
+struct MediaSource : public BnMediaSource {
     MediaSource();
 
     // To be called before any other methods on this object, except
@@ -47,8 +48,6 @@
     // Returns the format of the data output by this media source.
     virtual sp<MetaData> getFormat() = 0;
 
-    struct ReadOptions;
-
     // Returns a new buffer of data. Call blocks until a
     // buffer is available, an error is encountered of the end of the stream
     // is reached.
@@ -59,48 +58,10 @@
     virtual status_t read(
             MediaBuffer **buffer, const ReadOptions *options = NULL) = 0;
 
-    // Options that modify read() behaviour. The default is to
-    // a) not request a seek
-    // b) not be late, i.e. lateness_us = 0
-    struct ReadOptions {
-        enum SeekMode {
-            SEEK_PREVIOUS_SYNC,
-            SEEK_NEXT_SYNC,
-            SEEK_CLOSEST_SYNC,
-            SEEK_CLOSEST,
-        };
-
-        ReadOptions();
-
-        // Reset everything back to defaults.
-        void reset();
-
-        void setSeekTo(int64_t time_us, SeekMode mode = SEEK_CLOSEST_SYNC);
-        void clearSeekTo();
-        bool getSeekTo(int64_t *time_us, SeekMode *mode) const;
-
-        void setLateBy(int64_t lateness_us);
-        int64_t getLateBy() const;
-
-        void setNonBlocking();
-        void clearNonBlocking();
-        bool getNonBlocking() const;
-
-    private:
-        enum Options {
-            kSeekTo_Option      = 1,
-        };
-
-        uint32_t mOptions;
-        int64_t mSeekTimeUs;
-        SeekMode mSeekMode;
-        int64_t mLatenessUs;
-        bool mNonBlocking;
-    };
-
     // Causes this source to suspend pulling data from its upstream source
-    // until a subsequent read-with-seek. Currently only supported by
-    // OMXCodec.
+    // until a subsequent read-with-seek. This is currently not supported
+    // as such by any source. E.g. MediaCodecSource does not suspend its
+    // upstream source, and instead discard upstream data while paused.
     virtual status_t pause() {
         return ERROR_UNSUPPORTED;
     }
diff --git a/include/media/stagefright/MediaWriter.h b/include/media/stagefright/MediaWriter.h
index 8e02506..b6476c9 100644
--- a/include/media/stagefright/MediaWriter.h
+++ b/include/media/stagefright/MediaWriter.h
@@ -20,10 +20,10 @@
 
 #include <utils/RefBase.h>
 #include <media/IMediaRecorderClient.h>
+#include <media/IMediaSource.h>
 
 namespace android {
 
-struct MediaSource;
 class MetaData;
 
 struct MediaWriter : public RefBase {
@@ -32,7 +32,7 @@
           mMaxFileDurationLimitUs(0) {
     }
 
-    virtual status_t addSource(const sp<MediaSource> &source) = 0;
+    virtual status_t addSource(const sp<IMediaSource> &source) = 0;
     virtual bool reachedEOS() = 0;
     virtual status_t start(MetaData *params = NULL) = 0;
     virtual status_t stop() = 0;
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 8d4e15a..be7e5c1 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -22,6 +22,7 @@
 
 #include <stdint.h>
 
+#include <binder/Parcel.h>
 #include <utils/RefBase.h>
 #include <utils/KeyedVector.h>
 #include <utils/String8.h>
@@ -48,8 +49,11 @@
     kKeyChannelCount      = '#chn',  // int32_t
     kKeyChannelMask       = 'chnm',  // int32_t
     kKeySampleRate        = 'srte',  // int32_t (audio sampling rate Hz)
+    kKeyPcmEncoding       = 'PCMe',  // int32_t (audio encoding enum)
     kKeyFrameRate         = 'frmR',  // int32_t (video frame rate fps)
     kKeyBitRate           = 'brte',  // int32_t (bps)
+    kKeyMaxBitRate        = 'mxBr',  // int32_t (bps)
+    kKeyStreamHeader      = 'stHd',  // raw data
     kKeyESDS              = 'esds',  // raw data
     kKeyAACProfile        = 'aacp',  // int32_t
     kKeyAVCC              = 'avcc',  // raw data
@@ -60,6 +64,7 @@
     kKeyOpusHeader        = 'ohdr',  // raw data
     kKeyOpusCodecDelay    = 'ocod',  // uint64_t (codec delay in ns)
     kKeyOpusSeekPreRoll   = 'ospr',  // uint64_t (seek preroll in ns)
+    kKeyVp9CodecPrivate   = 'vp9p',  // raw data (vp9 csd information)
     kKeyWantsNALFragments = 'NALf',
     kKeyIsSyncFrame       = 'sync',  // int32_t (bool)
     kKeyIsCodecConfig     = 'conf',  // int32_t (bool)
@@ -181,6 +186,24 @@
 
     // H264 supplemental enhancement information offsets/sizes
     kKeySEI               = 'sei ', // raw data
+
+    // MPEG user data offsets
+    kKeyMpegUserData      = 'mpud', // size_t[]
+
+    // Size of NALU length in mkv/mp4
+    kKeyNalLengthSize     = 'nals', // int32_t
+
+    // HDR related
+    kKeyHdrStaticInfo    = 'hdrS', // HDRStaticInfo
+
+    // color aspects
+    kKeyColorRange       = 'cRng', // int32_t, color range, value defined by ColorAspects.Range
+    kKeyColorPrimaries   = 'cPrm', // int32_t,
+                                   // color Primaries, value defined by ColorAspects.Primaries
+    kKeyTransferFunction = 'tFun', // int32_t,
+                                   // transfer Function, value defined by ColorAspects.Transfer.
+    kKeyColorMatrix      = 'cMtx', // int32_t,
+                                   // color Matrix, value defined by ColorAspects.MatrixCoeffs.
 };
 
 enum {
@@ -237,8 +260,13 @@
 
     bool hasData(uint32_t key) const;
 
+    String8 toString() const;
     void dumpToLog() const;
 
+    status_t writeToParcel(Parcel &parcel);
+    status_t updateFromParcel(const Parcel &parcel);
+    static sp<MetaData> createFromParcel(const Parcel &parcel);
+
 protected:
     virtual ~MetaData();
 
@@ -253,7 +281,8 @@
         void clear();
         void setData(uint32_t type, const void *data, size_t size);
         void getData(uint32_t *type, const void **data, size_t *size) const;
-        String8 asString() const;
+        // may include hexdump of binary data if verbose=true
+        String8 asString(bool verbose) const;
 
     private:
         uint32_t mType;
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index fd74452..03e2185 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -19,6 +19,7 @@
 
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/MediaSource.h>
+#include <media/IMediaExtractor.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
@@ -43,6 +44,11 @@
         SAMPLE_FLAG_ENCRYPTED   = 2,
     };
 
+    // identical to IMediaExtractor::GetTrackMetaDataFlags
+    enum GetTrackFormatFlags {
+        kIncludeExtensiveMetaData = 1, // reads sample table and possibly stream headers
+    };
+
     NuMediaExtractor();
 
     status_t setDataSource(
@@ -55,7 +61,7 @@
     status_t setDataSource(const sp<DataSource> &datasource);
 
     size_t countTracks() const;
-    status_t getTrackFormat(size_t index, sp<AMessage> *format) const;
+    status_t getTrackFormat(size_t index, sp<AMessage> *format, uint32_t flags = 0) const;
 
     status_t getFileFormat(sp<AMessage> *format) const;
 
@@ -83,8 +89,12 @@
         kIsVorbis       = 1,
     };
 
+    enum {
+        kMaxTrackCount = 16384,
+    };
+
     struct TrackInfo {
-        sp<MediaSource> mSource;
+        sp<IMediaSource> mSource;
         size_t mTrackIndex;
         status_t mFinalResult;
         MediaBuffer *mSample;
@@ -97,7 +107,7 @@
 
     sp<DataSource> mDataSource;
 
-    sp<MediaExtractor> mImpl;
+    sp<IMediaExtractor> mImpl;
     bool mIsWidevineExtractor;
 
     Vector<TrackInfo> mSelectedTracks;
@@ -112,7 +122,8 @@
     void releaseTrackSamples();
 
     bool getTotalBitrate(int64_t *bitRate) const;
-    void updateDurationAndBitrate();
+    status_t updateDurationAndBitrate();
+    status_t appendVorbisNumPageSamples(TrackInfo *info, const sp<ABuffer> &buffer);
 
     DISALLOW_EVIL_CONSTRUCTORS(NuMediaExtractor);
 };
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
deleted file mode 100644
index b0404aa..0000000
--- a/include/media/stagefright/OMXCodec.h
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OMX_CODEC_H_
-
-#define OMX_CODEC_H_
-
-#include <android/native_window.h>
-#include <media/IOMX.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-#include <utils/threads.h>
-
-#include <OMX_Audio.h>
-
-namespace android {
-
-struct MediaCodecInfo;
-class MemoryDealer;
-struct OMXCodecObserver;
-struct CodecProfileLevel;
-class SkipCutBuffer;
-
-struct OMXCodec : public MediaSource,
-                  public MediaBufferObserver {
-    enum CreationFlags {
-        kPreferSoftwareCodecs    = 1,
-        kIgnoreCodecSpecificData = 2,
-
-        // Request for software or hardware codecs. If request
-        // can not be fullfilled, Create() returns NULL.
-        kSoftwareCodecsOnly      = 8,
-        kHardwareCodecsOnly      = 16,
-
-        // Store meta data in video buffers
-        kStoreMetaDataInVideoBuffers = 32,
-
-        // Only submit one input buffer at one time.
-        kOnlySubmitOneInputBufferAtOneTime = 64,
-
-        // Enable GRALLOC_USAGE_PROTECTED for output buffers from native window
-        kEnableGrallocUsageProtected = 128,
-
-        // Secure decoding mode
-        kUseSecureInputBuffers = 256,
-    };
-    static sp<MediaSource> Create(
-            const sp<IOMX> &omx,
-            const sp<MetaData> &meta, bool createEncoder,
-            const sp<MediaSource> &source,
-            const char *matchComponentName = NULL,
-            uint32_t flags = 0,
-            const sp<ANativeWindow> &nativeWindow = NULL);
-
-    static void setComponentRole(
-            const sp<IOMX> &omx, IOMX::node_id node, bool isEncoder,
-            const char *mime);
-
-    virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-    virtual status_t pause();
-
-    // from MediaBufferObserver
-    virtual void signalBufferReturned(MediaBuffer *buffer);
-
-    enum Quirks {
-        kNeedsFlushBeforeDisable              = 1,
-        kWantsNALFragments                    = 2,
-        kRequiresLoadedToIdleAfterAllocation  = 4,
-        kRequiresAllocateBufferOnInputPorts   = 8,
-        kRequiresFlushCompleteEmulation       = 16,
-        kRequiresAllocateBufferOnOutputPorts  = 32,
-        kRequiresFlushBeforeShutdown          = 64,
-        kDefersOutputBufferAllocation         = 128,
-        kDecoderLiesAboutNumberOfChannels     = 256,
-        kInputBufferSizesAreBogus             = 512,
-        kSupportsMultipleFramesPerInputBuffer = 1024,
-        kRequiresLargerEncoderOutputBuffer    = 2048,
-        kOutputBuffersAreUnreadable           = 4096,
-    };
-
-    struct CodecNameAndQuirks {
-        String8 mName;
-        uint32_t mQuirks;
-    };
-
-    // for use by ACodec
-    static void findMatchingCodecs(
-            const char *mime,
-            bool createEncoder, const char *matchComponentName,
-            uint32_t flags,
-            Vector<CodecNameAndQuirks> *matchingCodecNamesAndQuirks);
-
-    static uint32_t getComponentQuirks(
-            const sp<MediaCodecInfo> &list);
-
-    static bool findCodecQuirks(const char *componentName, uint32_t *quirks);
-
-protected:
-    virtual ~OMXCodec();
-
-private:
-
-    // Make sure mLock is accessible to OMXCodecObserver
-    friend class OMXCodecObserver;
-
-    // Call this with mLock hold
-    void on_message(const omx_message &msg);
-
-    enum State {
-        DEAD,
-        LOADED,
-        LOADED_TO_IDLE,
-        IDLE_TO_EXECUTING,
-        EXECUTING,
-        EXECUTING_TO_IDLE,
-        IDLE_TO_LOADED,
-        RECONFIGURING,
-        ERROR
-    };
-
-    enum {
-        kPortIndexInput  = 0,
-        kPortIndexOutput = 1
-    };
-
-    enum PortStatus {
-        ENABLED,
-        DISABLING,
-        DISABLED,
-        ENABLING,
-        SHUTTING_DOWN,
-    };
-
-    enum BufferStatus {
-        OWNED_BY_US,
-        OWNED_BY_COMPONENT,
-        OWNED_BY_NATIVE_WINDOW,
-        OWNED_BY_CLIENT,
-    };
-
-    struct BufferInfo {
-        IOMX::buffer_id mBuffer;
-        BufferStatus mStatus;
-        sp<IMemory> mMem;
-        size_t mSize;
-        void *mData;
-        MediaBuffer *mMediaBuffer;
-    };
-
-    struct CodecSpecificData {
-        size_t mSize;
-        uint8_t mData[1];
-    };
-
-    sp<IOMX> mOMX;
-    bool mOMXLivesLocally;
-    IOMX::node_id mNode;
-    uint32_t mQuirks;
-
-    // Flags specified in the creation of the codec.
-    uint32_t mFlags;
-
-    bool mIsEncoder;
-    bool mIsVideo;
-    char *mMIME;
-    char *mComponentName;
-    sp<MetaData> mOutputFormat;
-    sp<MediaSource> mSource;
-    Vector<CodecSpecificData *> mCodecSpecificData;
-    size_t mCodecSpecificDataIndex;
-
-    sp<MemoryDealer> mDealer[2];
-
-    State mState;
-    Vector<BufferInfo> mPortBuffers[2];
-    PortStatus mPortStatus[2];
-    bool mInitialBufferSubmit;
-    bool mSignalledEOS;
-    status_t mFinalStatus;
-    bool mNoMoreOutputData;
-    bool mOutputPortSettingsHaveChanged;
-    int64_t mSeekTimeUs;
-    ReadOptions::SeekMode mSeekMode;
-    int64_t mTargetTimeUs;
-    bool mOutputPortSettingsChangedPending;
-    sp<SkipCutBuffer> mSkipCutBuffer;
-
-    MediaBuffer *mLeftOverBuffer;
-
-    Mutex mLock;
-    Condition mAsyncCompletion;
-
-    bool mPaused;
-
-    sp<ANativeWindow> mNativeWindow;
-
-    // The index in each of the mPortBuffers arrays of the buffer that will be
-    // submitted to OMX next.  This only applies when using buffers from a
-    // native window.
-    size_t mNextNativeBufferIndex[2];
-
-    // A list of indices into mPortStatus[kPortIndexOutput] filled with data.
-    List<size_t> mFilledBuffers;
-    Condition mBufferFilled;
-
-    // Used to record the decoding time for an output picture from
-    // a video encoder.
-    List<int64_t> mDecodingTimeList;
-
-    OMXCodec(const sp<IOMX> &omx, IOMX::node_id node,
-             uint32_t quirks, uint32_t flags,
-             bool isEncoder, const char *mime, const char *componentName,
-             const sp<MediaSource> &source,
-             const sp<ANativeWindow> &nativeWindow);
-
-    void addCodecSpecificData(const void *data, size_t size);
-    void clearCodecSpecificData();
-
-    void setComponentRole();
-
-    void setAMRFormat(bool isWAMR, int32_t bitRate);
-
-    status_t setAACFormat(
-            int32_t numChannels, int32_t sampleRate, int32_t bitRate,
-            int32_t aacProfile, bool isADTS);
-
-    status_t setAC3Format(int32_t numChannels, int32_t sampleRate);
-
-    void setG711Format(int32_t sampleRate, int32_t numChannels);
-
-    status_t setVideoPortFormatType(
-            OMX_U32 portIndex,
-            OMX_VIDEO_CODINGTYPE compressionFormat,
-            OMX_COLOR_FORMATTYPE colorFormat);
-
-    void setVideoInputFormat(
-            const char *mime, const sp<MetaData>& meta);
-
-    status_t setupBitRate(int32_t bitRate);
-    status_t setupErrorCorrectionParameters();
-    status_t setupH263EncoderParameters(const sp<MetaData>& meta);
-    status_t setupMPEG4EncoderParameters(const sp<MetaData>& meta);
-    status_t setupAVCEncoderParameters(const sp<MetaData>& meta);
-    status_t findTargetColorFormat(
-            const sp<MetaData>& meta, OMX_COLOR_FORMATTYPE *colorFormat);
-
-    status_t isColorFormatSupported(
-            OMX_COLOR_FORMATTYPE colorFormat, int portIndex);
-
-    // If profile/level is set in the meta data, its value in the meta
-    // data will be used; otherwise, the default value will be used.
-    status_t getVideoProfileLevel(const sp<MetaData>& meta,
-            const CodecProfileLevel& defaultProfileLevel,
-            CodecProfileLevel& profileLevel);
-
-    status_t setVideoOutputFormat(
-            const char *mime, const sp<MetaData>& meta);
-
-    void setImageOutputFormat(
-            OMX_COLOR_FORMATTYPE format, OMX_U32 width, OMX_U32 height);
-
-    void setJPEGInputFormat(
-            OMX_U32 width, OMX_U32 height, OMX_U32 compressedSize);
-
-    void setMinBufferSize(OMX_U32 portIndex, OMX_U32 size);
-
-    void setRawAudioFormat(
-            OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels);
-
-    status_t allocateBuffers();
-    status_t allocateBuffersOnPort(OMX_U32 portIndex);
-    status_t allocateOutputBuffersFromNativeWindow();
-
-    status_t queueBufferToNativeWindow(BufferInfo *info);
-    status_t cancelBufferToNativeWindow(BufferInfo *info);
-    BufferInfo* dequeueBufferFromNativeWindow();
-
-    status_t freeBuffersOnPort(
-            OMX_U32 portIndex, bool onlyThoseWeOwn = false);
-
-    status_t freeBuffer(OMX_U32 portIndex, size_t bufIndex);
-
-    bool drainInputBuffer(IOMX::buffer_id buffer);
-    void fillOutputBuffer(IOMX::buffer_id buffer);
-    bool drainInputBuffer(BufferInfo *info);
-    void fillOutputBuffer(BufferInfo *info);
-
-    void drainInputBuffers();
-    void fillOutputBuffers();
-
-    bool drainAnyInputBuffer();
-    BufferInfo *findInputBufferByDataPointer(void *ptr);
-    BufferInfo *findEmptyInputBuffer();
-
-    // Returns true iff a flush was initiated and a completion event is
-    // upcoming, false otherwise (A flush was not necessary as we own all
-    // the buffers on that port).
-    // This method will ONLY ever return false for a component with quirk
-    // "kRequiresFlushCompleteEmulation".
-    bool flushPortAsync(OMX_U32 portIndex);
-
-    void disablePortAsync(OMX_U32 portIndex);
-    status_t enablePortAsync(OMX_U32 portIndex);
-
-    static size_t countBuffersWeOwn(const Vector<BufferInfo> &buffers);
-    static bool isIntermediateState(State state);
-
-    void onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2);
-    void onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data);
-    void onStateChange(OMX_STATETYPE newState);
-    void onPortSettingsChanged(OMX_U32 portIndex);
-
-    void setState(State newState);
-
-    status_t init();
-    void initOutputFormat(const sp<MetaData> &inputFormat);
-    status_t initNativeWindow();
-
-    void initNativeWindowCrop();
-
-    void dumpPortStatus(OMX_U32 portIndex);
-
-    status_t configureCodec(const sp<MetaData> &meta);
-
-    status_t waitForBufferFilled_l();
-
-    int64_t getDecodingTimeUs();
-
-    status_t parseHEVCCodecSpecificData(
-            const void *data, size_t size,
-            unsigned *profile, unsigned *level);
-    status_t parseAVCCodecSpecificData(
-            const void *data, size_t size,
-            unsigned *profile, unsigned *level);
-
-    status_t stopOmxComponent_l();
-
-    OMXCodec(const OMXCodec &);
-    OMXCodec &operator=(const OMXCodec &);
-};
-
-struct CodecCapabilities {
-    enum {
-        kFlagSupportsAdaptivePlayback = 1 << 0,
-    };
-
-    String8 mComponentName;
-    Vector<CodecProfileLevel> mProfileLevels;
-    Vector<OMX_U32> mColorFormats;
-    uint32_t mFlags;
-};
-
-// Return a vector of componentNames with supported profile/level pairs
-// supporting the given mime type, if queryDecoders==true, returns components
-// that decode content of the given type, otherwise returns components
-// that encode content of the given type.
-// profile and level indications only make sense for h.263, mpeg4 and avc
-// video.
-// If hwCodecOnly==true, only returns hardware-based components, software and
-// hardware otherwise.
-// The profile/level values correspond to
-// OMX_VIDEO_H263PROFILETYPE, OMX_VIDEO_MPEG4PROFILETYPE,
-// OMX_VIDEO_AVCPROFILETYPE, OMX_VIDEO_H263LEVELTYPE, OMX_VIDEO_MPEG4LEVELTYPE
-// and OMX_VIDEO_AVCLEVELTYPE respectively.
-
-status_t QueryCodecs(
-        const sp<IOMX> &omx,
-        const char *mimeType, bool queryDecoders, bool hwCodecOnly,
-        Vector<CodecCapabilities> *results);
-
-status_t QueryCodecs(
-        const sp<IOMX> &omx,
-        const char *mimeType, bool queryDecoders,
-        Vector<CodecCapabilities> *results);
-
-status_t QueryCodec(
-        const sp<IOMX> &omx,
-        const char *componentName, const char *mime,
-        bool isEncoder,
-        CodecCapabilities *caps);
-
-status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]);
-
-}  // namespace android
-
-#endif  // OMX_CODEC_H_
diff --git a/include/media/stagefright/ProcessInfo.h b/include/media/stagefright/ProcessInfo.h
index ec0cdff..0be1a52 100644
--- a/include/media/stagefright/ProcessInfo.h
+++ b/include/media/stagefright/ProcessInfo.h
@@ -27,6 +27,7 @@
     ProcessInfo();
 
     virtual bool getPriority(int pid, int* priority);
+    virtual bool isValidPid(int pid);
 
 protected:
     virtual ~ProcessInfo();
diff --git a/include/media/stagefright/ProcessInfoInterface.h b/include/media/stagefright/ProcessInfoInterface.h
index 222f92d..b39112a 100644
--- a/include/media/stagefright/ProcessInfoInterface.h
+++ b/include/media/stagefright/ProcessInfoInterface.h
@@ -23,6 +23,7 @@
 
 struct ProcessInfoInterface : public RefBase {
     virtual bool getPriority(int pid, int* priority) = 0;
+    virtual bool isValidPid(int pid) = 0;
 
 protected:
     virtual ~ProcessInfoInterface() {}
diff --git a/include/media/stagefright/SimpleDecodingSource.h b/include/media/stagefright/SimpleDecodingSource.h
new file mode 100644
index 0000000..534097b
--- /dev/null
+++ b/include/media/stagefright/SimpleDecodingSource.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SIMPLE_DECODING_SOURCE_H_
+#define SIMPLE_DECODING_SOURCE_H_
+
+#include <system/window.h>
+
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/Mutexed.h>
+
+#include <utils/Condition.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+struct ALooper;
+struct AMessage;
+class MediaBuffer;
+struct MediaCodec;
+class MetaData;
+
+class SimpleDecodingSource : public MediaSource {
+public:
+    // Creates a MediaSource that uses MediaCodec to decode a compressed input |source|.
+    // The selected codec can be influenced using |flags|. This source only supports the
+    // kPreferGoogleCodec and kNonGoogleCodecsOnly |flags| - MediaCodecList.
+    // You can pass in a target |nativeWindow| to render video directly onto a surface. In this
+    // case the source will return empty buffers.
+    // This source cannot be restarted (hence the name "Simple"), all reads are blocking, and
+    // does not support secure input or pausing.
+    // if |desiredCodec| is given, use this specific codec.
+    static sp<SimpleDecodingSource> Create(
+            const sp<IMediaSource> &source, uint32_t flags = 0,
+            const sp<ANativeWindow> &nativeWindow = NULL,
+            const char *desiredCodec = NULL);
+
+    virtual ~SimpleDecodingSource();
+
+    // starts this source (and it's underlying source). |params| is ignored.
+    virtual status_t start(MetaData *params = NULL);
+
+    // stops this source (and it's underlying source).
+    virtual status_t stop();
+
+    // returns the output format of this source.
+    virtual sp<MetaData> getFormat();
+
+    // reads from the source. This call always blocks.
+    virtual status_t read(MediaBuffer **buffer, const ReadOptions *options);
+
+    // unsupported methods
+    virtual status_t pause() { return INVALID_OPERATION; }
+    virtual status_t setBuffers(const Vector<MediaBuffer *> &) { return INVALID_OPERATION; }
+
+private:
+    // Construct this using a codec, source and looper.
+    SimpleDecodingSource(
+            const sp<MediaCodec> &codec, const sp<IMediaSource> &source, const sp<ALooper> &looper,
+            bool usingSurface, const sp<AMessage> &format);
+
+    sp<MediaCodec> mCodec;
+    sp<IMediaSource> mSource;
+    sp<ALooper> mLooper;
+    bool mUsingSurface;
+    enum State {
+        INIT,
+        STARTED,
+        STOPPING,
+        STOPPED,
+        ERROR,
+    };
+    AString mComponentName;
+
+    struct ProtectedState {
+        ProtectedState(const sp<AMessage> &format);
+        bool mReading;
+        Condition mReadCondition;
+
+        sp<AMessage> mFormat;
+        State mState;
+        bool mQueuedInputEOS;
+        bool mGotOutputEOS;
+    };
+    Mutexed<ProtectedState> mProtectedState;
+
+    // do the actual reading
+    status_t doRead(
+            Mutexed<ProtectedState>::Locked &me, MediaBuffer **buffer, const ReadOptions *options);
+};
+
+} // namespace android
+
+#endif
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 098aa69..61f9949 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -29,9 +29,10 @@
  */
 class SkipCutBuffer: public RefBase {
  public:
-    // 'skip' is the number of bytes to skip from the beginning
-    // 'cut' is the number of bytes to cut from the end
-    SkipCutBuffer(int32_t skip, int32_t cut);
+    // 'skip' is the number of frames to skip from the beginning
+    // 'cut' is the number of frames to cut from the end
+    // 'num16BitChannels' is the number of channels, which are assumed to be 16 bit wide each
+    SkipCutBuffer(size_t skip, size_t cut, size_t num16Channels);
 
     // Submit one MediaBuffer for skipping and cutting. This may consume all or
     // some of the data in the buffer, or it may add data to it.
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 2177c00..ca3a3bf 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -25,6 +25,8 @@
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MediaBuffer.h>
 
+#include <MetadataBufferType.h>
+
 #include "foundation/ABase.h"
 
 namespace android {
@@ -109,9 +111,9 @@
     void dump(String8& result, const char* prefix, char* buffer,
                                                     size_t SIZE) const;
 
-    // isMetaDataStoredInVideoBuffers tells the encoder whether we will
-    // pass metadata through the buffers. Currently, it is force set to true
-    bool isMetaDataStoredInVideoBuffers() const;
+    // metaDataStoredInVideoBuffers tells the encoder what kind of metadata
+    // is passed through the buffers. Currently, it is set to ANWBuffer
+    MetadataBufferType metaDataStoredInVideoBuffers() const;
 
     sp<IGraphicBufferProducer> getProducer() const { return mProducer; }
 
@@ -234,6 +236,9 @@
 
     Condition mMediaBuffersAvailableCondition;
 
+    // Allocate and return a new MediaBuffer and pass the ANW buffer as metadata into it.
+    void passMetadataBuffer_l(MediaBuffer **buffer, ANativeWindowBuffer *bufferHandle) const;
+
     // Avoid copying and equating and default constructor
     DISALLOW_EVIL_CONSTRUCTORS(SurfaceMediaSource);
 };
diff --git a/include/media/stagefright/SurfaceUtils.h b/include/media/stagefright/SurfaceUtils.h
index c1a9c0a..13d580c 100644
--- a/include/media/stagefright/SurfaceUtils.h
+++ b/include/media/stagefright/SurfaceUtils.h
@@ -24,9 +24,14 @@
 
 namespace android {
 
+/**
+ * Configures |nativeWindow| for given |width|x|height|, pixel |format|, |rotation| and |usage|.
+ * If |reconnect| is true, reconnects to the native window before hand.
+ * @return first error encountered, or NO_ERROR on success.
+ */
 status_t setNativeWindowSizeFormatAndUsage(
         ANativeWindow *nativeWindow /* nonnull */,
-        int width, int height, int format, int rotation, int usage);
+        int width, int height, int format, int rotation, int usage, bool reconnect);
 status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */);
 
 } // namespace android
diff --git a/include/media/stagefright/TimeSource.h b/include/media/stagefright/TimeSource.h
deleted file mode 100644
index 8f11e14..0000000
--- a/include/media/stagefright/TimeSource.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIME_SOURCE_H_
-
-#define TIME_SOURCE_H_
-
-#include <stdint.h>
-
-namespace android {
-
-class TimeSource {
-public:
-    TimeSource() {}
-    virtual ~TimeSource() {}
-
-    virtual int64_t getRealTimeUs() = 0;
-
-private:
-    TimeSource(const TimeSource &);
-    TimeSource &operator=(const TimeSource &);
-};
-
-class SystemTimeSource : public TimeSource {
-public:
-    SystemTimeSource();
-
-    virtual int64_t getRealTimeUs();
-
-private:
-    int64_t mStartTimeUs;
-};
-
-}  // namespace android
-
-#endif  // TIME_SOURCE_H_
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index 5e9d7d4..01b3e3f 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -48,6 +48,11 @@
 void convertMessageToMetaData(
         const sp<AMessage> &format, sp<MetaData> &meta);
 
+// Returns a pointer to the next NAL start code in buffer of size |length| starting at |data|, or
+// a pointer to the end of the buffer if the start code is not found.
+// TODO: combine this with avc_utils::getNextNALUnit
+const uint8_t *findNextNalStartCode(const uint8_t *data, size_t length);
+
 AString MakeUserAgent();
 
 // Convert a MIME type to a AudioSystem::audio_format
@@ -85,6 +90,8 @@
 void readFromAMessage(
         const sp<AMessage> &msg, AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
 
+AString nameForFd(int fd);
+
 }  // namespace android
 
 #endif  // UTILS_H_
diff --git a/include/media/stagefright/foundation/ABitReader.h b/include/media/stagefright/foundation/ABitReader.h
index c3bf0ff..a30dd2e 100644
--- a/include/media/stagefright/foundation/ABitReader.h
+++ b/include/media/stagefright/foundation/ABitReader.h
@@ -30,23 +30,44 @@
     ABitReader(const uint8_t *data, size_t size);
     virtual ~ABitReader();
 
-    uint32_t getBits(size_t n);
-    void skipBits(size_t n);
+    // Tries to get |n| bits. If not successful, returns |fallback|. Otherwise, returns result.
+    // Reading 0 bits will always succeed and return 0.
+    uint32_t getBitsWithFallback(size_t n, uint32_t fallback);
 
+    // Tries to get |n| bits. If not successful, returns false. Otherwise, stores result in |out|
+    // and returns true. Use !overRead() to determine if this call was successful. Reading 0 bits
+    // will always succeed and write 0 in |out|.
+    bool getBitsGraceful(size_t n, uint32_t *out);
+
+    // Gets |n| bits and returns result. ABORTS if unsuccessful. Reading 0 bits will always
+    // succeed.
+    uint32_t getBits(size_t n);
+
+    // Tries to skip |n| bits. Returns true iff successful. Skipping 0 bits will always succeed.
+    bool skipBits(size_t n);
+
+    // "Puts" |n| bits with the value |x| back virtually into the bit stream. The put-back bits
+    // are not actually written into the data, but are tracked in a separate buffer that can
+    // store at most 32 bits. This is a no-op if the stream has already been over-read.
     void putBits(uint32_t x, size_t n);
 
     size_t numBitsLeft() const;
 
     const uint8_t *data() const;
 
+    // Returns true iff the stream was over-read (e.g. any getBits operation has been unsuccessful
+    // due to overread (and not trying to read >32 bits).)
+    bool overRead() const { return mOverRead; }
+
 protected:
     const uint8_t *mData;
     size_t mSize;
 
     uint32_t mReservoir;  // left-aligned bits
     size_t mNumBitsLeft;
+    bool mOverRead;
 
-    virtual void fillReservoir();
+    virtual bool fillReservoir();
 
     DISALLOW_EVIL_CONSTRUCTORS(ABitReader);
 };
@@ -60,7 +81,7 @@
 private:
     int32_t mNumZeros;
 
-    virtual void fillReservoir();
+    virtual bool fillReservoir();
 
     DISALLOW_EVIL_CONSTRUCTORS(NALBitReader);
 };
diff --git a/include/media/stagefright/foundation/ABuffer.h b/include/media/stagefright/foundation/ABuffer.h
index 6294ee7..dc9c778 100644
--- a/include/media/stagefright/foundation/ABuffer.h
+++ b/include/media/stagefright/foundation/ABuffer.h
@@ -33,8 +33,6 @@
     ABuffer(size_t capacity);
     ABuffer(void *data, size_t capacity);
 
-    void setFarewellMessage(const sp<AMessage> msg);
-
     uint8_t *base() { return (uint8_t *)mData; }
     uint8_t *data() { return (uint8_t *)mData + mRangeOffset; }
     size_t capacity() const { return mCapacity; }
@@ -58,7 +56,6 @@
     virtual ~ABuffer();
 
 private:
-    sp<AMessage> mFarewell;
     sp<AMessage> mMeta;
 
     MediaBufferBase *mMediaBufferBase;
diff --git a/include/media/stagefright/foundation/ADebug.h b/include/media/stagefright/foundation/ADebug.h
index 65f415a..564b3f7 100644
--- a/include/media/stagefright/foundation/ADebug.h
+++ b/include/media/stagefright/foundation/ADebug.h
@@ -24,8 +24,9 @@
 #include <media/stagefright/foundation/AString.h>
 #include <utils/Log.h>
 
-inline static const char *asString(android::status_t i, const char *def = "??") {
-    using namespace android;
+namespace android {
+
+inline static const char *asString(status_t i, const char *def = "??") {
     switch (i) {
         case NO_ERROR:              return "NO_ERROR";
         case UNKNOWN_ERROR:         return "UNKNOWN_ERROR";
@@ -49,8 +50,6 @@
     }
 }
 
-namespace android {
-
 #define LITERAL_TO_STRING_INTERNAL(x)    #x
 #define LITERAL_TO_STRING(x) LITERAL_TO_STRING_INTERNAL(x)
 
diff --git a/include/media/stagefright/foundation/ALookup.h b/include/media/stagefright/foundation/ALookup.h
new file mode 100644
index 0000000..5a68806
--- /dev/null
+++ b/include/media/stagefright/foundation/ALookup.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_LOOKUP_H_
+
+#define A_LOOKUP_H_
+
+#include <utility>
+#include <vector>
+
+namespace android {
+
+template<typename T, typename U>
+struct ALookup {
+    ALookup(std::initializer_list<std::pair<T, U>> list);
+
+    bool lookup(const T& from, U *to) const;
+    bool rlookup(const U& from, T *to) const;
+
+    template<typename V, typename = typename std::enable_if<!std::is_same<T, V>::value>::type>
+    inline bool map(const T& from, V *to) const { return lookup(from, to); }
+
+    template<typename V, typename = typename std::enable_if<!std::is_same<T, V>::value>::type>
+    inline bool map(const V& from, T *to) const { return rlookup(from, to); }
+
+private:
+    std::vector<std::pair<T, U>> mTable;
+};
+
+template<typename T, typename U>
+ALookup<T, U>::ALookup(std::initializer_list<std::pair<T, U>> list)
+    : mTable(list) {
+}
+
+template<typename T, typename U>
+bool ALookup<T, U>::lookup(const T& from, U *to) const {
+    for (auto elem : mTable) {
+        if (elem.first == from) {
+            *to = elem.second;
+            return true;
+        }
+    }
+    return false;
+}
+
+template<typename T, typename U>
+bool ALookup<T, U>::rlookup(const U& from, T *to) const {
+    for (auto elem : mTable) {
+        if (elem.second == from) {
+            *to = elem.first;
+            return true;
+        }
+    }
+    return false;
+}
+
+} // namespace android
+
+#endif  // A_UTILS_H_
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 83b9444..87c32a6 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -62,7 +62,29 @@
     AMessage();
     AMessage(uint32_t what, const sp<const AHandler> &handler);
 
-    static sp<AMessage> FromParcel(const Parcel &parcel);
+    // Construct an AMessage from a parcel.
+    // nestingAllowed determines how many levels AMessage can be nested inside
+    // AMessage. The default value here is arbitrarily set to 255.
+    // FromParcel() returns NULL on error, which occurs when the input parcel
+    // contains
+    // - an AMessage nested deeper than maxNestingLevel; or
+    // - an item whose type is not recognized by this function.
+    // Types currently recognized by this function are:
+    //   Item types      set/find function suffixes
+    //   ==========================================
+    //     int32_t                Int32
+    //     int64_t                Int64
+    //     size_t                 Size
+    //     float                  Float
+    //     double                 Double
+    //     AString                String
+    //     AMessage               Message
+    static sp<AMessage> FromParcel(const Parcel &parcel,
+                                   size_t maxNestingLevel = 255);
+
+    // Write this AMessage to a parcel.
+    // All items in the AMessage must have types that are recognized by
+    // FromParcel(); otherwise, TRESPASS error will occur.
     void writeToParcel(Parcel *parcel) const;
 
     void setWhat(uint32_t what);
@@ -127,6 +149,15 @@
     // their refcount incremented.
     sp<AMessage> dup() const;
 
+    // Performs a shallow or deep comparison of |this| and |other| and returns
+    // an AMessage with the differences.
+    // Warning: RefBase items, i.e. "objects" are _not_ copied but only have
+    // their refcount incremented.
+    // This is true for AMessages that have no corresponding AMessage equivalent in |other|.
+    // (E.g. there is no such key or the type is different.) On the other hand, changes in
+    // the AMessage (or AMessages if deep is |false|) are returned in new objects.
+    sp<AMessage> changesFrom(const sp<const AMessage> &other, bool deep = false) const;
+
     AString debugString(int32_t indent = 0) const;
 
     enum Type {
diff --git a/include/media/stagefright/foundation/AUtils.h b/include/media/stagefright/foundation/AUtils.h
index 47444c1..255a0f4 100644
--- a/include/media/stagefright/foundation/AUtils.h
+++ b/include/media/stagefright/foundation/AUtils.h
@@ -68,6 +68,7 @@
 
 // needle is in range [hayStart, hayStart + haySize)
 template<class T, class U>
+__attribute__((no_sanitize("integer")))
 inline static bool isInRange(const T &hayStart, const U &haySize, const T &needle) {
     ENSURE_UNSIGNED_TYPE<U>();
     return (T)(hayStart + haySize) >= hayStart && needle >= hayStart && (U)(needle - hayStart) < haySize;
@@ -75,6 +76,7 @@
 
 // [needleStart, needleStart + needleSize) is in range [hayStart, hayStart + haySize)
 template<class T, class U>
+__attribute__((no_sanitize("integer")))
 inline static bool isInRange(
         const T &hayStart, const U &haySize, const T &needleStart, const U &needleSize) {
     ENSURE_UNSIGNED_TYPE<U>();
diff --git a/include/media/stagefright/foundation/ColorUtils.h b/include/media/stagefright/foundation/ColorUtils.h
new file mode 100644
index 0000000..2368b82
--- /dev/null
+++ b/include/media/stagefright/foundation/ColorUtils.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COLOR_UTILS_H_
+
+#define COLOR_UTILS_H_
+
+#include <stdint.h>
+
+#define STRINGIFY_ENUMS
+
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <system/graphics.h>
+
+namespace android {
+
+struct ColorUtils {
+    /*
+     * Media-platform color constants. MediaCodec uses (an extended version of) platform-defined
+     * constants that are derived from HAL_DATASPACE, since these are directly exposed to the user.
+     * We extend the values to maintain the richer set of information defined inside media
+     * containers and bitstreams that are not supported by the platform. We also expect vendors
+     * to extend some of these values with vendor-specific values. These are separated into a
+     * vendor-extension section so they won't collide with future platform values.
+     */
+
+#define GET_HAL_ENUM(class, name) HAL_DATASPACE_##class##name
+#define GET_HAL_BITFIELD(class, name) (GET_HAL_ENUM(class, _##name) >> GET_HAL_ENUM(class, _SHIFT))
+
+    enum ColorStandard : uint32_t {
+        kColorStandardUnspecified =          GET_HAL_BITFIELD(STANDARD, UNSPECIFIED),
+        kColorStandardBT709 =                GET_HAL_BITFIELD(STANDARD, BT709),
+        kColorStandardBT601_625 =            GET_HAL_BITFIELD(STANDARD, BT601_625),
+        kColorStandardBT601_625_Unadjusted = GET_HAL_BITFIELD(STANDARD, BT601_625_UNADJUSTED),
+        kColorStandardBT601_525 =            GET_HAL_BITFIELD(STANDARD, BT601_525),
+        kColorStandardBT601_525_Unadjusted = GET_HAL_BITFIELD(STANDARD, BT601_525_UNADJUSTED),
+        kColorStandardBT2020 =               GET_HAL_BITFIELD(STANDARD, BT2020),
+        kColorStandardBT2020Constant =       GET_HAL_BITFIELD(STANDARD, BT2020_CONSTANT_LUMINANCE),
+        kColorStandardBT470M =               GET_HAL_BITFIELD(STANDARD, BT470M),
+        kColorStandardFilm =                 GET_HAL_BITFIELD(STANDARD, FILM),
+        kColorStandardMax =                  GET_HAL_BITFIELD(STANDARD, MASK),
+
+        /* This marks a section of color-standard values that are not supported by graphics HAL,
+           but track defined color primaries-matrix coefficient combinations in media.
+           These are stable for a given release. */
+        kColorStandardExtendedStart = kColorStandardMax + 1,
+
+        /* This marks a section of color-standard values that are not supported by graphics HAL
+           nor using media defined color primaries or matrix coefficients. These may differ per
+           device. */
+        kColorStandardVendorStart = 0x10000,
+    };
+
+    enum ColorTransfer : uint32_t  {
+        kColorTransferUnspecified = GET_HAL_BITFIELD(TRANSFER, UNSPECIFIED),
+        kColorTransferLinear =      GET_HAL_BITFIELD(TRANSFER, LINEAR),
+        kColorTransferSRGB =        GET_HAL_BITFIELD(TRANSFER, SRGB),
+        kColorTransferSMPTE_170M =  GET_HAL_BITFIELD(TRANSFER, SMPTE_170M),
+        kColorTransferGamma22 =     GET_HAL_BITFIELD(TRANSFER, GAMMA2_2),
+        kColorTransferGamma28 =     GET_HAL_BITFIELD(TRANSFER, GAMMA2_8),
+        kColorTransferST2084 =      GET_HAL_BITFIELD(TRANSFER, ST2084),
+        kColorTransferHLG =         GET_HAL_BITFIELD(TRANSFER, HLG),
+        kColorTransferMax =         GET_HAL_BITFIELD(TRANSFER, MASK),
+
+        /* This marks a section of color-transfer values that are not supported by graphics HAL,
+           but track media-defined color-transfer. These are stable for a given release. */
+        kColorTransferExtendedStart = kColorTransferMax + 1,
+
+        /* This marks a section of color-transfer values that are not supported by graphics HAL
+           nor defined by media. These may differ per device. */
+        kColorTransferVendorStart = 0x10000,
+    };
+
+    enum ColorRange : uint32_t  {
+        kColorRangeUnspecified = GET_HAL_BITFIELD(RANGE, UNSPECIFIED),
+        kColorRangeFull =        GET_HAL_BITFIELD(RANGE, FULL),
+        kColorRangeLimited =     GET_HAL_BITFIELD(RANGE, LIMITED),
+        kColorRangeMax =         GET_HAL_BITFIELD(RANGE, MASK),
+
+        /* This marks a section of color-transfer values that are not supported by graphics HAL,
+           but track media-defined color-transfer. These are stable for a given release. */
+        kColorRangeExtendedStart = kColorRangeMax + 1,
+
+        /* This marks a section of color-transfer values that are not supported by graphics HAL
+           nor defined by media. These may differ per device. */
+        kColorRangeVendorStart = 0x10000,
+    };
+
+#undef GET_HAL_BITFIELD
+#undef GET_HAL_ENUM
+
+    /*
+     * Static utilities for codec support
+     */
+
+    // using int32_t for media range/standard/transfers to denote extended ranges
+    // wrap methods change invalid aspects to the Unspecified value
+    static int32_t wrapColorAspectsIntoColorStandard(
+            ColorAspects::Primaries primaries, ColorAspects::MatrixCoeffs coeffs);
+    static int32_t wrapColorAspectsIntoColorRange(ColorAspects::Range range);
+    static int32_t wrapColorAspectsIntoColorTransfer(ColorAspects::Transfer transfer);
+
+    // unwrap methods change invalid aspects to the Other value
+    static status_t unwrapColorAspectsFromColorRange(
+            int32_t range, ColorAspects::Range *aspect);
+    static status_t unwrapColorAspectsFromColorTransfer(
+            int32_t transfer, ColorAspects::Transfer *aspect);
+    static status_t unwrapColorAspectsFromColorStandard(
+            int32_t standard,
+            ColorAspects::Primaries *primaries, ColorAspects::MatrixCoeffs *coeffs);
+
+    static status_t convertPlatformColorAspectsToCodecAspects(
+            int32_t range, int32_t standard, int32_t transfer, ColorAspects &aspects);
+    static status_t convertCodecColorAspectsToPlatformAspects(
+            const ColorAspects &aspects, int32_t *range, int32_t *standard, int32_t *transfer);
+
+    // converts Other values to Unspecified
+    static void convertCodecColorAspectsToIsoAspects(
+            const ColorAspects &aspects,
+            int32_t *primaries, int32_t *transfer, int32_t *coeffs, bool *fullRange);
+    // converts unsupported values to Other
+    static void convertIsoColorAspectsToCodecAspects(
+            int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
+            ColorAspects &aspects);
+
+    // updates Unspecified color aspects to their defaults based on the video size
+    static void setDefaultCodecColorAspectsIfNeeded(
+            ColorAspects &aspects, int32_t width, int32_t height);
+
+    // it returns the closest dataSpace for given color |aspects|. if |mayExpand| is true, it allows
+    // returning a larger dataSpace that contains the color space given by |aspects|, and is better
+    // suited to blending. This requires implicit color space conversion on part of the device.
+    static android_dataspace getDataSpaceForColorAspects(ColorAspects &aspects, bool mayExpand);
+
+    // converts |dataSpace| to a V0 enum, and returns true if dataSpace is an aspect-only value
+    static bool convertDataSpaceToV0(android_dataspace &dataSpace);
+
+    // compares |aspect| to |orig|. Returns |true| if any aspects have changed, except if they
+    // changed to Unspecified value. It also sets the changed values to Unspecified in |aspect|.
+    static bool checkIfAspectsChangedAndUnspecifyThem(
+            ColorAspects &aspects, const ColorAspects &orig, bool usePlatformAspects = false);
+
+    // finds color config in format, defaulting them to 0.
+    static void getColorConfigFromFormat(
+            const sp<AMessage> &format, int *range, int *standard, int *transfer);
+
+    // copies existing color config from |source| to |target|.
+    static void copyColorConfig(const sp<AMessage> &source, sp<AMessage> &target);
+
+    // finds color config in format as ColorAspects, defaulting them to 0.
+    static void getColorAspectsFromFormat(const sp<AMessage> &format, ColorAspects &aspects);
+
+    // writes |aspects| into format. iff |force| is false, Unspecified values are not
+    // written.
+    static void setColorAspectsIntoFormat(
+            const ColorAspects &aspects, sp<AMessage> &format, bool force = false);
+
+    // finds HDR metadata in format as HDRStaticInfo, defaulting them to 0.
+    // Return |true| if could find HDR metadata in format. Otherwise, return |false|.
+    static bool getHDRStaticInfoFromFormat(const sp<AMessage> &format, HDRStaticInfo *info);
+
+    // writes |info| into format.
+    static void setHDRStaticInfoIntoFormat(const HDRStaticInfo &info, sp<AMessage> &format);
+};
+
+inline static const char *asString(android::ColorUtils::ColorStandard i, const char *def = "??") {
+    using namespace android;
+    switch (i) {
+        case ColorUtils::kColorStandardUnspecified:          return "Unspecified";
+        case ColorUtils::kColorStandardBT709:                return "BT709";
+        case ColorUtils::kColorStandardBT601_625:            return "BT601_625";
+        case ColorUtils::kColorStandardBT601_625_Unadjusted: return "BT601_625_Unadjusted";
+        case ColorUtils::kColorStandardBT601_525:            return "BT601_525";
+        case ColorUtils::kColorStandardBT601_525_Unadjusted: return "BT601_525_Unadjusted";
+        case ColorUtils::kColorStandardBT2020:               return "BT2020";
+        case ColorUtils::kColorStandardBT2020Constant:       return "BT2020Constant";
+        case ColorUtils::kColorStandardBT470M:               return "BT470M";
+        case ColorUtils::kColorStandardFilm:                 return "Film";
+        default:                                            return def;
+    }
+}
+
+inline static const char *asString(android::ColorUtils::ColorTransfer i, const char *def = "??") {
+    using namespace android;
+    switch (i) {
+        case ColorUtils::kColorTransferUnspecified: return "Unspecified";
+        case ColorUtils::kColorTransferLinear:      return "Linear";
+        case ColorUtils::kColorTransferSRGB:        return "SRGB";
+        case ColorUtils::kColorTransferSMPTE_170M:  return "SMPTE_170M";
+        case ColorUtils::kColorTransferGamma22:     return "Gamma22";
+        case ColorUtils::kColorTransferGamma28:     return "Gamma28";
+        case ColorUtils::kColorTransferST2084:      return "ST2084";
+        case ColorUtils::kColorTransferHLG:         return "HLG";
+        default:                                   return def;
+    }
+}
+
+inline static const char *asString(android::ColorUtils::ColorRange i, const char *def = "??") {
+    using namespace android;
+    switch (i) {
+        case ColorUtils::kColorRangeUnspecified: return "Unspecified";
+        case ColorUtils::kColorRangeFull:        return "Full";
+        case ColorUtils::kColorRangeLimited:     return "Limited";
+        default:                                return def;
+    }
+}
+
+}  // namespace android
+
+#endif  // COLOR_UTILS_H_
+
diff --git a/include/media/stagefright/foundation/Mutexed.h b/include/media/stagefright/foundation/Mutexed.h
new file mode 100644
index 0000000..143b140
--- /dev/null
+++ b/include/media/stagefright/foundation/Mutexed.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_MUTEXED_H_
+#define STAGEFRIGHT_FOUNDATION_MUTEXED_H_
+
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+
+namespace android {
+
+/*
+ * Wrapper class to programmatically protect a structure using a mutex.
+ *
+ * Mutexed<> objects contain a built-in mutex. Protection is enforced because the structure can
+ * only be accessed by locking the mutex first.
+ *
+ * Usage:
+ *
+ * struct DataToProtect {
+ *   State(int var1) : mVar1(var1), mVar2(0) { }
+ *   int mVar1;
+ *   int mVar2;
+ *   Condition mCondition1;
+ * };
+ *
+ * Mutexed<DataToProtect> mProtectedData;
+ *
+ * // members are inaccessible via mProtectedData directly
+ *
+ * void someFunction() {
+ *   Mutexed<DataToProtect>::Locked data(mProtectedData); // access the protected data
+ *
+ *   // the mutex is locked here, so accessing the data is safe
+ *
+ *   if (data->mVar1 < 5) {
+ *     ++data->mVar2;
+ *   }
+ *
+ *   // if you need to temporarily unlock the mutex, you can use unlock/relock mutex locally
+ *   // using the accessor object.
+ *
+ *   data.unlock();
+ *
+ *   // data is inaccessible here
+ *
+ *   doSomeLongOperation();
+ *
+ *   data.lock();
+ *
+ *   // data is now accessible again. Note: it may have changed since unlock().
+ *
+ *   // you can use the integral mutex to wait for a condition
+ *
+ *   data.waitForCondition(data->mCondition1);
+ *
+ *   helper(&data);
+ * }
+ *
+ * void trigger() {
+ *   Mutexed<DataToProtect>::Locked data(mProtectedData);
+ *   data->mCondition1.signal();
+ * }
+ *
+ * void helper(const Mutexed<DataToProtect>::Locked &data) {
+ *   data->mVar1 = 3;
+ * }
+ *
+ */
+
+template<typename T>
+class Mutexed {
+public:
+    /*
+     * Accessor-guard of the mutex-protected structure. This can be dereferenced to
+     * access the structure (using -> or * operators).
+     *
+     * Upon creation, the mutex is locked. You can use lock()/unlock() methods to
+     * temporarily lock/unlock the mutex. Using any references to the underlying
+     * structure or its members defeats the protection of this class, so don't do
+     * it.
+     *
+     * Note: The accessor-guard itself is not thread-safe. E.g. you should not call
+     * unlock() or lock() from different threads; they must be called from the thread
+     * that locked the original wrapper.
+     *
+     * Also note: Recursive locking/unlocking is not supported by the accessor. This
+     * is as intended, as it allows lenient locking/unlocking via multiple code paths.
+     */
+    class Locked {
+    public:
+        inline Locked(Mutexed<T> &mParent);
+        inline Locked(Locked &&from) :
+            mLock(from.mLock),
+            mTreasure(from.mTreasure),
+            mLocked(from.mLocked) {}
+        inline ~Locked();
+
+        // dereference the protected structure. This returns nullptr if the
+        // mutex is not locked by this accessor-guard.
+        inline T* operator->() const { return mLocked ? &mTreasure : nullptr; }
+        inline T& operator*()  const { return mLocked ?  mTreasure : *(T*)nullptr; }
+
+        // same as *
+        inline T& get() const { return mLocked ?  mTreasure : *(T*)nullptr; }
+        // sets structure. this will abort if mLocked is false.
+        inline void set(T& o) const { get() = o; }
+
+        // Wait on the condition variable using lock. Must be locked.
+        inline status_t waitForCondition(Condition &cond) { return cond.wait(mLock); }
+
+        // same with relative timeout
+        inline status_t waitForConditionRelative(Condition &cond, nsecs_t reltime) {
+            return cond.waitRelative(mLock, reltime);
+        }
+
+        // unlocks the integral mutex. No-op if the mutex was already unlocked.
+        inline void unlock();
+
+        // locks the integral mutex. No-op if the mutex was already locked.
+        inline void lock();
+
+    private:
+        Mutex &mLock;
+        T &mTreasure;
+        bool mLocked;
+
+        // disable copy constructors
+        Locked(const Locked&) = delete;
+        void operator=(const Locked&) = delete;
+    };
+
+    // Wrap all constructors of the underlying structure
+    template<typename ...Args>
+    Mutexed(Args... args) : mTreasure(args...) { }
+
+    ~Mutexed() { }
+
+    // Lock the mutex, and create an accessor-guard (a Locked object) to access the underlying
+    // structure. This returns an object that dereferences to the wrapped structure when the mutex
+    // is locked by it, or otherwise to "null".
+    // This is just a shorthand for Locked() constructor to avoid specifying the template type.
+    inline Locked lock() {
+        return Locked(*this);
+    }
+
+private:
+    friend class Locked;
+    Mutex mLock;
+    T mTreasure;
+
+    // disable copy constructors
+    Mutexed(const Mutexed<T>&) = delete;
+    void operator=(const Mutexed<T>&) = delete;
+};
+
+template<typename T>
+inline Mutexed<T>::Locked::Locked(Mutexed<T> &mParent)
+    : mLock(mParent.mLock),
+      mTreasure(mParent.mTreasure),
+      mLocked(true) {
+    mLock.lock();
+}
+
+template<typename T>
+inline Mutexed<T>::Locked::~Locked() {
+    if (mLocked) {
+        mLock.unlock();
+    }
+}
+
+template<typename T>
+inline void Mutexed<T>::Locked::unlock() {
+    if (mLocked) {
+        mLocked = false;
+        mLock.unlock();
+    }
+}
+
+template<typename T>
+inline void Mutexed<T>::Locked::lock() {
+    if (!mLocked) {
+        mLock.lock();
+        mLocked = true;
+    }
+}
+
+} // namespace android
+
+#endif
diff --git a/include/media/stagefright/timedtext/TimedTextDriver.h b/include/media/stagefright/timedtext/TimedTextDriver.h
deleted file mode 100644
index 6f7c693..0000000
--- a/include/media/stagefright/timedtext/TimedTextDriver.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIMED_TEXT_DRIVER_H_
-#define TIMED_TEXT_DRIVER_H_
-
-#include <media/stagefright/foundation/ABase.h> // for DISALLOW_* macro
-#include <utils/Errors.h> // for status_t
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-
-namespace android {
-
-struct ALooper;
-struct IMediaHTTPService;
-class MediaPlayerBase;
-class MediaSource;
-class Parcel;
-class TimedTextPlayer;
-class TimedTextSource;
-class DataSource;
-
-class TimedTextDriver {
-public:
-    TimedTextDriver(
-            const wp<MediaPlayerBase> &listener,
-            const sp<IMediaHTTPService> &httpService);
-
-    ~TimedTextDriver();
-
-    status_t start();
-    status_t pause();
-    status_t selectTrack(size_t index);
-    status_t unselectTrack(size_t index);
-
-    status_t seekToAsync(int64_t timeUs);
-
-    status_t addInBandTextSource(
-            size_t trackIndex, const sp<MediaSource>& source);
-
-    status_t addOutOfBandTextSource(
-            size_t trackIndex, const char *uri, const char *mimeType);
-
-    // Caller owns the file desriptor and caller is responsible for closing it.
-    status_t addOutOfBandTextSource(
-            size_t trackIndex, int fd, off64_t offset,
-            off64_t length, const char *mimeType);
-
-    void getExternalTrackInfo(Parcel *parcel);
-    size_t countExternalTracks() const;
-
-private:
-    Mutex mLock;
-
-    enum State {
-        UNINITIALIZED,
-        PREPARED,
-        PLAYING,
-        PAUSED,
-    };
-
-    enum TextSourceType {
-        TEXT_SOURCE_TYPE_IN_BAND = 0,
-        TEXT_SOURCE_TYPE_OUT_OF_BAND,
-    };
-
-    sp<ALooper> mLooper;
-    sp<TimedTextPlayer> mPlayer;
-    wp<MediaPlayerBase> mListener;
-    sp<IMediaHTTPService> mHTTPService;
-
-    // Variables to be guarded by mLock.
-    State mState;
-    size_t mCurrentTrackIndex;
-    KeyedVector<size_t, sp<TimedTextSource> > mTextSourceVector;
-    Vector<TextSourceType> mTextSourceTypeVector;
-
-    // -- End of variables to be guarded by mLock
-
-    status_t selectTrack_l(size_t index);
-
-    status_t createOutOfBandTextSource(
-            size_t trackIndex, const char* mimeType,
-            const sp<DataSource>& dataSource);
-
-    DISALLOW_EVIL_CONSTRUCTORS(TimedTextDriver);
-};
-
-}  // namespace android
-
-#endif  // TIMED_TEXT_DRIVER_H_
diff --git a/include/ndk/NdkImage.h b/include/ndk/NdkImage.h
new file mode 100644
index 0000000..cd0b11e
--- /dev/null
+++ b/include/ndk/NdkImage.h
@@ -0,0 +1,613 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Media Camera
+ * @{
+ */
+
+/**
+ * @file NdkImage.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#ifndef _NDK_IMAGE_H
+#define _NDK_IMAGE_H
+
+#include "NdkMediaError.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * AImage is an opaque type that provides access to image generated by {@link AImageReader}.
+ */
+typedef struct AImage AImage;
+
+// Formats not listed here will not be supported by AImageReader
+enum AIMAGE_FORMATS {
+    /**
+     * Multi-plane Android YUV 420 format.
+     *
+     * <p>This format is a generic YCbCr format, capable of describing any 4:2:0
+     * chroma-subsampled planar or semiplanar buffer (but not fully interleaved),
+     * with 8 bits per color sample.</p>
+     *
+     * <p>Images in this format are always represented by three separate buffers
+     * of data, one for each color plane. Additional information always
+     * accompanies the buffers, describing the row stride and the pixel stride
+     * for each plane.</p>
+     *
+     * <p>The order of planes is guaranteed such that plane #0 is always Y, plane #1 is always
+     * U (Cb), and plane #2 is always V (Cr).</p>
+     *
+     * <p>The Y-plane is guaranteed not to be interleaved with the U/V planes
+     * (in particular, pixel stride is always 1 in {@link AImage_getPlanePixelStride}).</p>
+     *
+     * <p>The U/V planes are guaranteed to have the same row stride and pixel stride, that is, the
+     * return value of {@link AImage_getPlaneRowStride} for the U/V plane are guaranteed to be the
+     * same, and the return value of {@link AImage_getPlanePixelStride} for the U/V plane are also
+     * guaranteed to be the same.</p>
+     *
+     * <p>For example, the {@link AImage} object can provide data
+     * in this format from a {@link ACameraDevice} through an {@link AImageReader} object.</p>
+     *
+     * <p>This format is always supported as an output format for the android Camera2 NDK API.</p>
+     *
+     * @see AImage
+     * @see AImageReader
+     * @see ACameraDevice
+     */
+    AIMAGE_FORMAT_YUV_420_888       = 0x23,
+
+    /**
+     * Compressed JPEG format.
+     *
+     * <p>This format is always supported as an output format for the android Camera2 NDK API.</p>
+     */
+    AIMAGE_FORMAT_JPEG              = 0x100,
+
+    /**
+     * 16 bits per pixel raw camera sensor image format, usually representing a single-channel
+     * Bayer-mosaic image.
+     *
+     * <p>The layout of the color mosaic, the maximum and minimum encoding
+     * values of the raw pixel data, the color space of the image, and all other
+     * needed information to interpret a raw sensor image must be queried from
+     * the {@link ACameraDevice} which produced the image.</p>
+     */
+    AIMAGE_FORMAT_RAW16             = 0x20,
+
+    /**
+     * Private raw camera sensor image format, a single channel image with implementation depedent
+     * pixel layout.
+     *
+     * <p>AIMAGE_FORMAT_RAW_PRIVATE is a format for unprocessed raw image buffers coming from an
+     * image sensor. The actual structure of buffers of this format is implementation-dependent.</p>
+     *
+     */
+    AIMAGE_FORMAT_RAW_PRIVATE       = 0x24,
+
+    /**
+     * Android 10-bit raw format.
+     *
+     * <p>
+     * This is a single-plane, 10-bit per pixel, densely packed (in each row),
+     * unprocessed format, usually representing raw Bayer-pattern images coming
+     * from an image sensor.
+     * </p>
+     * <p>
+     * In an image buffer with this format, starting from the first pixel of
+     * each row, each 4 consecutive pixels are packed into 5 bytes (40 bits).
+     * Each one of the first 4 bytes contains the top 8 bits of each pixel, The
+     * fifth byte contains the 2 least significant bits of the 4 pixels, the
+     * exact layout data for each 4 consecutive pixels is illustrated below
+     * (Pi[j] stands for the jth bit of the ith pixel):
+     * </p>
+     * <table>
+     * <tr>
+     * <th align="center"></th>
+     * <th align="center">bit 7</th>
+     * <th align="center">bit 6</th>
+     * <th align="center">bit 5</th>
+     * <th align="center">bit 4</th>
+     * <th align="center">bit 3</th>
+     * <th align="center">bit 2</th>
+     * <th align="center">bit 1</th>
+     * <th align="center">bit 0</th>
+     * </tr>
+     * <tr>
+     * <td align="center">Byte 0:</td>
+     * <td align="center">P0[9]</td>
+     * <td align="center">P0[8]</td>
+     * <td align="center">P0[7]</td>
+     * <td align="center">P0[6]</td>
+     * <td align="center">P0[5]</td>
+     * <td align="center">P0[4]</td>
+     * <td align="center">P0[3]</td>
+     * <td align="center">P0[2]</td>
+     * </tr>
+     * <tr>
+     * <td align="center">Byte 1:</td>
+     * <td align="center">P1[9]</td>
+     * <td align="center">P1[8]</td>
+     * <td align="center">P1[7]</td>
+     * <td align="center">P1[6]</td>
+     * <td align="center">P1[5]</td>
+     * <td align="center">P1[4]</td>
+     * <td align="center">P1[3]</td>
+     * <td align="center">P1[2]</td>
+     * </tr>
+     * <tr>
+     * <td align="center">Byte 2:</td>
+     * <td align="center">P2[9]</td>
+     * <td align="center">P2[8]</td>
+     * <td align="center">P2[7]</td>
+     * <td align="center">P2[6]</td>
+     * <td align="center">P2[5]</td>
+     * <td align="center">P2[4]</td>
+     * <td align="center">P2[3]</td>
+     * <td align="center">P2[2]</td>
+     * </tr>
+     * <tr>
+     * <td align="center">Byte 3:</td>
+     * <td align="center">P3[9]</td>
+     * <td align="center">P3[8]</td>
+     * <td align="center">P3[7]</td>
+     * <td align="center">P3[6]</td>
+     * <td align="center">P3[5]</td>
+     * <td align="center">P3[4]</td>
+     * <td align="center">P3[3]</td>
+     * <td align="center">P3[2]</td>
+     * </tr>
+     * <tr>
+     * <td align="center">Byte 4:</td>
+     * <td align="center">P3[1]</td>
+     * <td align="center">P3[0]</td>
+     * <td align="center">P2[1]</td>
+     * <td align="center">P2[0]</td>
+     * <td align="center">P1[1]</td>
+     * <td align="center">P1[0]</td>
+     * <td align="center">P0[1]</td>
+     * <td align="center">P0[0]</td>
+     * </tr>
+     * </table>
+     * <p>
+     * This format assumes
+     * <ul>
+     * <li>a width multiple of 4 pixels</li>
+     * <li>an even height</li>
+     * </ul>
+     * </p>
+     *
+     * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
+     * not pixels.
+     *
+     * <p>
+     * Since this is a densely packed format, the pixel stride is always 0. The
+     * application must use the pixel data layout defined in above table to
+     * access each row data. When row stride is equal to (width * (10 / 8)), there
+     * will be no padding bytes at the end of each row, the entire image data is
+     * densely packed. When stride is larger than (width * (10 / 8)), padding
+     * bytes will be present at the end of each row.
+     * </p>
+     * <p>
+     * For example, the {@link AImage} object can provide data in this format from a
+     * {@link ACameraDevice} (if supported) through a {@link AImageReader} object.
+     * The number of planes returned by {@link AImage_getNumberOfPlanes} will always be 1.
+     * The pixel stride is undefined ({@link AImage_getPlanePixelStride} will return
+     * {@link AMEDIA_ERROR_UNSUPPORTED}), and the {@link AImage_getPlaneRowStride} described the
+     * vertical neighboring pixel distance (in bytes) between adjacent rows.
+     * </p>
+     *
+     * @see AImage
+     * @see AImageReader
+     * @see ACameraDevice
+     */
+    AIMAGE_FORMAT_RAW10             = 0x25,
+
+    /**
+     * Android 12-bit raw format.
+     *
+     * <p>
+     * This is a single-plane, 12-bit per pixel, densely packed (in each row),
+     * unprocessed format, usually representing raw Bayer-pattern images coming
+     * from an image sensor.
+     * </p>
+     * <p>
+     * In an image buffer with this format, starting from the first pixel of each
+     * row, each two consecutive pixels are packed into 3 bytes (24 bits). The first
+     * and second byte contains the top 8 bits of first and second pixel. The third
+     * byte contains the 4 least significant bits of the two pixels, the exact layout
+     * data for each two consecutive pixels is illustrated below (Pi[j] stands for
+     * the jth bit of the ith pixel):
+     * </p>
+     * <table>
+     * <tr>
+     * <th align="center"></th>
+     * <th align="center">bit 7</th>
+     * <th align="center">bit 6</th>
+     * <th align="center">bit 5</th>
+     * <th align="center">bit 4</th>
+     * <th align="center">bit 3</th>
+     * <th align="center">bit 2</th>
+     * <th align="center">bit 1</th>
+     * <th align="center">bit 0</th>
+     * </tr>
+     * <tr>
+     * <td align="center">Byte 0:</td>
+     * <td align="center">P0[11]</td>
+     * <td align="center">P0[10]</td>
+     * <td align="center">P0[ 9]</td>
+     * <td align="center">P0[ 8]</td>
+     * <td align="center">P0[ 7]</td>
+     * <td align="center">P0[ 6]</td>
+     * <td align="center">P0[ 5]</td>
+     * <td align="center">P0[ 4]</td>
+     * </tr>
+     * <tr>
+     * <td align="center">Byte 1:</td>
+     * <td align="center">P1[11]</td>
+     * <td align="center">P1[10]</td>
+     * <td align="center">P1[ 9]</td>
+     * <td align="center">P1[ 8]</td>
+     * <td align="center">P1[ 7]</td>
+     * <td align="center">P1[ 6]</td>
+     * <td align="center">P1[ 5]</td>
+     * <td align="center">P1[ 4]</td>
+     * </tr>
+     * <tr>
+     * <td align="center">Byte 2:</td>
+     * <td align="center">P1[ 3]</td>
+     * <td align="center">P1[ 2]</td>
+     * <td align="center">P1[ 1]</td>
+     * <td align="center">P1[ 0]</td>
+     * <td align="center">P0[ 3]</td>
+     * <td align="center">P0[ 2]</td>
+     * <td align="center">P0[ 1]</td>
+     * <td align="center">P0[ 0]</td>
+     * </tr>
+     * </table>
+     * <p>
+     * This format assumes
+     * <ul>
+     * <li>a width multiple of 4 pixels</li>
+     * <li>an even height</li>
+     * </ul>
+     * </p>
+     *
+     * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
+     * not pixels.
+     *
+     * <p>
+     * Since this is a densely packed format, the pixel stride is always 0. The
+     * application must use the pixel data layout defined in above table to
+     * access each row data. When row stride is equal to (width * (12 / 8)), there
+     * will be no padding bytes at the end of each row, the entire image data is
+     * densely packed. When stride is larger than (width * (12 / 8)), padding
+     * bytes will be present at the end of each row.
+     * </p>
+     * <p>
+     * For example, the {@link AImage} object can provide data in this format from a
+     * {@link ACameraDevice} (if supported) through a {@link AImageReader} object.
+     * The number of planes returned by {@link AImage_getNumberOfPlanes} will always be 1.
+     * The pixel stride is undefined ({@link AImage_getPlanePixelStride} will return
+     * {@link AMEDIA_ERROR_UNSUPPORTED}), and the {@link AImage_getPlaneRowStride} described the
+     * vertical neighboring pixel distance (in bytes) between adjacent rows.
+     * </p>
+     *
+     * @see AImage
+     * @see AImageReader
+     * @see ACameraDevice
+     */
+    AIMAGE_FORMAT_RAW12             = 0x26,
+
+    /**
+     * Android dense depth image format.
+     *
+     * <p>Each pixel is 16 bits, representing a depth ranging measurement from a depth camera or
+     * similar sensor. The 16-bit sample consists of a confidence value and the actual ranging
+     * measurement.</p>
+     *
+     * <p>The confidence value is an estimate of correctness for this sample.  It is encoded in the
+     * 3 most significant bits of the sample, with a value of 0 representing 100% confidence, a
+     * value of 1 representing 0% confidence, a value of 2 representing 1/7, a value of 3
+     * representing 2/7, and so on.</p>
+     *
+     * <p>As an example, the following sample extracts the range and confidence from the first pixel
+     * of a DEPTH16-format {@link AImage}, and converts the confidence to a floating-point value
+     * between 0 and 1.f inclusive, with 1.f representing maximum confidence:
+     *
+     * <pre>
+     *    uint16_t* data;
+     *    int dataLength;
+     *    AImage_getPlaneData(image, 0, (uint8_t**)&data, &dataLength);
+     *    uint16_t depthSample = data[0];
+     *    uint16_t depthRange = (depthSample & 0x1FFF);
+     *    uint16_t depthConfidence = ((depthSample >> 13) & 0x7);
+     *    float depthPercentage = depthConfidence == 0 ? 1.f : (depthConfidence - 1) / 7.f;
+     * </pre>
+     * </p>
+     *
+     * <p>This format assumes
+     * <ul>
+     * <li>an even width</li>
+     * <li>an even height</li>
+     * <li>a horizontal stride multiple of 16 pixels</li>
+     * </ul>
+     * </p>
+     *
+     * <pre> y_size = stride * height </pre>
+     *
+     * When produced by a camera, the units for the range are millimeters.
+     */
+    AIMAGE_FORMAT_DEPTH16           = 0x44363159,
+
+    /**
+     * Android sparse depth point cloud format.
+     *
+     * <p>A variable-length list of 3D points plus a confidence value, with each point represented
+     * by four floats; first the X, Y, Z position coordinates, and then the confidence value.</p>
+     *
+     * <p>The number of points is ((size of the buffer in bytes) / 16).
+     *
+     * <p>The coordinate system and units of the position values depend on the source of the point
+     * cloud data. The confidence value is between 0.f and 1.f, inclusive, with 0 representing 0%
+     * confidence and 1.f representing 100% confidence in the measured position values.</p>
+     *
+     * <p>As an example, the following code extracts the first depth point in a DEPTH_POINT_CLOUD
+     * format {@link AImage}:
+     * <pre>
+     *    float* data;
+     *    int dataLength;
+     *    AImage_getPlaneData(image, 0, (uint8_t**)&data, &dataLength);
+     *    float x = data[0];
+     *    float y = data[1];
+     *    float z = data[2];
+     *    float confidence = data[3];
+     * </pre>
+     *
+     */
+    AIMAGE_FORMAT_DEPTH_POINT_CLOUD = 0x101,
+
+    /**
+     * Android private opaque image format.
+     *
+     * <p>This format is not currently supported by {@link AImageReader}.</p>
+     */
+    AIMAGE_FORMAT_PRIVATE           = 0x22
+};
+
+/**
+ * Data type describing an cropped rectangle returned by {@link AImage_getCropRect}.
+ *
+ * <p>Note that the right and bottom coordinates are exclusive, so the width of the rectangle is
+ * (right - left) and the height of the rectangle is (bottom - top).</p>
+ */
+typedef struct AImageCropRect {
+    int32_t left;
+    int32_t top;
+    int32_t right;
+    int32_t bottom;
+} AImageCropRect;
+
+/**
+ * Return the image back the the system and delete the AImage object from memory.
+ *
+ * <p>Do NOT use the image pointer after this method returns.
+ * Note that if the parent {@link AImageReader} is closed, all the {@link AImage} objects acquired
+ * from the parent reader will be returned to system. All AImage_* methods except this method will
+ * return {@link AMEDIA_ERROR_INVALID_OBJECT}. Application still needs to call this method on those
+ * {@link AImage} objects to fully delete the {@link AImage} object from memory.</p>
+ *
+ * @param image The {@link AImage} to be deleted.
+ */
+void AImage_delete(AImage* image);
+
+/**
+ * Query the width of the input {@link AImage}.
+ *
+ * @param image the {@link AImage} of interest.
+ * @param width the width of the image will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or width is NULL.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getWidth(const AImage* image, /*out*/int32_t* width);
+
+/**
+ * Query the height of the input {@link AImage}.
+ *
+ * @param image the {@link AImage} of interest.
+ * @param height the height of the image will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or height is NULL.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getHeight(const AImage* image, /*out*/int32_t* height);
+
+/**
+ * Query the format of the input {@link AImage}.
+ *
+ * <p>The format value will be one of AIMAGE_FORMAT_* enum value.</p>
+ *
+ * @param image the {@link AImage} of interest.
+ * @param format the format of the image will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or format is NULL.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getFormat(const AImage* image, /*out*/int32_t* format);
+
+/**
+ * Query the cropped rectangle of the input {@link AImage}.
+ *
+ * <p>The crop rectangle specifies the region of valid pixels in the image, using coordinates in the
+ * largest-resolution plane.</p>
+ *
+ * @param image the {@link AImage} of interest.
+ * @param rect the cropped rectangle of the image will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or rect is NULL.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getCropRect(const AImage* image, /*out*/AImageCropRect* rect);
+
+/**
+ * Query the timestamp of the input {@link AImage}.
+ *
+ * <p>
+ * The timestamp is measured in nanoseconds, and is normally monotonically increasing. The
+ * timestamps for the images from different sources may have different timebases therefore may not
+ * be comparable. The specific meaning and timebase of the timestamp depend on the source providing
+ * images. For images generated by camera, the timestamp value will match
+ * {@link ACAMERA_SENSOR_TIMESTAMP} of the {@link ACameraMetadata} in
+ * {@link ACameraCaptureSession_captureCallbacks#onCaptureStarted} and
+ * {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted} callback.
+ * </p>
+ *
+ * @param image the {@link AImage} of interest.
+ * @param timestampNs the timestamp of the image will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or timestampNs is NULL.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getTimestamp(const AImage* image, /*out*/int64_t* timestampNs);
+
+/**
+ * Query the number of planes of the input {@link AImage}.
+ *
+ * <p>The number of plane of an {@link AImage} is determined by its format, which can be queried by
+ * {@link AImage_getFormat} method.</p>
+ *
+ * @param image the {@link AImage} of interest.
+ * @param numPlanes the number of planes of the image will be filled here if the method call
+ *         succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or numPlanes is NULL.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getNumberOfPlanes(const AImage* image, /*out*/int32_t* numPlanes);
+
+/**
+ * Query the pixel stride of the input {@link AImage}.
+ *
+ * <p>This is the distance between two consecutive pixel values in a row of pixels. It may be
+ * larger than the size of a single pixel to account for interleaved image data or padded formats.
+ * Note that pixel stride is undefined for some formats such as {@link AIMAGE_FORMAT_RAW_PRIVATE},
+ * and calling this method on images of these formats will cause {@link AMEDIA_ERROR_UNSUPPORTED}
+ * being returned.
+ * For formats where pixel stride is well defined, the pixel stride is always greater than 0.</p>
+ *
+ * @param image the {@link AImage} of interest.
+ * @param planeIdx the index of the plane. Must be less than the number of planes of input image.
+ * @param pixelStride the pixel stride of the image will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or pixelStride is NULL, or planeIdx
+ *                 is out of the range of [0, numOfPlanes - 1].</li>
+ *         <li>{@link AMEDIA_ERROR_UNSUPPORTED} if pixel stride is undefined for the format of input
+ *                 image.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getPlanePixelStride(
+        const AImage* image, int planeIdx, /*out*/int32_t* pixelStride);
+
+/**
+ * Query the row stride of the input {@link AImage}.
+ *
+ * <p>This is the distance between the start of two consecutive rows of pixels in the image. Note
+ * that row stried is undefined for some formats such as {@link AIMAGE_FORMAT_RAW_PRIVATE}, and
+ * calling this method on images of these formats will cause {@link AMEDIA_ERROR_UNSUPPORTED}
+ * being returned.
+ * For formats where row stride is well defined, the row stride is always greater than 0.</p>
+ *
+ * @param image the {@link AImage} of interest.
+ * @param planeIdx the index of the plane. Must be less than the number of planes of input image.
+ * @param rowStride the row stride of the image will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or rowStride is NULL, or planeIdx
+ *                 is out of the range of [0, numOfPlanes - 1].</li>
+ *         <li>{@link AMEDIA_ERROR_UNSUPPORTED} if row stride is undefined for the format of input
+ *                 image.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getPlaneRowStride(
+        const AImage* image, int planeIdx, /*out*/int32_t* rowStride);
+
+/**
+ * Get the data pointer of the input image for direct application access.
+ *
+ * <p>Note that once the {@link AImage} or the parent {@link AImageReader} is deleted, the data
+ * pointer from previous AImage_getPlaneData call becomes invalid. Do NOT use it after the
+ * {@link AImage} or the parent {@link AImageReader} is deleted.</p>
+ *
+ * @param image the {@link AImage} of interest.
+ * @param planeIdx the index of the plane. Must be less than the number of planes of input image.
+ * @param data the data pointer of the image will be filled here if the method call succeeeds.
+ * @param dataLength the valid length of data will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image, data or dataLength is NULL, or
+ *                 planeIdx is out of the range of [0, numOfPlanes - 1].</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ *                 image has been deleted.</li></ul>
+ */
+media_status_t AImage_getPlaneData(
+        const AImage* image, int planeIdx,
+        /*out*/uint8_t** data, /*out*/int* dataLength);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif //_NDK_IMAGE_H
+
+/** @} */
diff --git a/include/ndk/NdkImageReader.h b/include/ndk/NdkImageReader.h
new file mode 100644
index 0000000..7c7ec6a
--- /dev/null
+++ b/include/ndk/NdkImageReader.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Media Camera
+ * @{
+ */
+
+/**
+ * @file NdkImageReader.h
+ */
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#ifndef _NDK_IMAGE_READER_H
+#define _NDK_IMAGE_READER_H
+
+#include <android/native_window.h>
+#include "NdkMediaError.h"
+#include "NdkImage.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * AImage is an opaque type that allows direct application access to image data rendered into a
+ * {@link ANativeWindow}.
+ */
+typedef struct AImageReader AImageReader;
+
+/**
+ * Create a new reader for images of the desired size and format.
+ *
+ * <p>
+ * The maxImages parameter determines the maximum number of {@link AImage} objects that can be
+ * acquired from the {@link AImageReader} simultaneously. Requesting more buffers will use up
+ * more memory, so it is important to use only the minimum number necessary for the use case.
+ * </p>
+ * <p>
+ * The valid sizes and formats depend on the source of the image data.
+ * </p>
+ *
+ * @param width The default width in pixels of the Images that this reader will produce.
+ * @param height The default height in pixels of the Images that this reader will produce.
+ * @param format The format of the Image that this reader will produce. This must be one of the
+ *            AIMAGE_FORMAT_* enum value defined in {@link AIMAGE_FORMATS}. Note that not all
+ *            formats are supported, like {@link AIMAGE_FORMAT_PRIVATE}.
+ * @param maxImages The maximum number of images the user will want to access simultaneously. This
+ *            should be as small as possible to limit memory use. Once maxImages Images are obtained
+ *            by the user, one of them has to be released before a new {@link AImage} will become
+ *            available for access through {@link AImageReader_acquireLatestImage} or
+ *            {@link AImageReader_acquireNextImage}. Must be greater than 0.
+ * @param reader The created image reader will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader is NULL, or one or more of width,
+ *                 height, format, maxImages arguments is not supported.</li>
+ *         <li>{@link AMEDIA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ *
+ * @see AImage
+ */
+media_status_t AImageReader_new(
+        int32_t width, int32_t height, int32_t format, int32_t maxImages,
+        /*out*/AImageReader** reader);
+
+/**
+ * Delete an {@link AImageReader} and return all images generated by this reader to system.
+ *
+ * <p>This method will return all {@link AImage} objects acquired by this reader (via
+ * {@link AImageReader_acquireNextImage} or {@link AImageReader_acquireLatestImage}) to system,
+ * making any of data pointers obtained from {@link AImage_getPlaneData} invalid. Do NOT access
+ * the reader object or any of those data pointers after this method returns.</p>
+ *
+ * @param reader The image reader to be deleted.
+ */
+void AImageReader_delete(AImageReader* reader);
+
+/**
+ * Get a {@link ANativeWindow} that can be used to produce {@link AImage} for this image reader.
+ *
+ * @param reader The image reader of interest.
+ * @param window The output {@link ANativeWindow} will be filled here if the method call succeeds.
+ *                The {@link ANativeWindow} is managed by this image reader. Do NOT call
+ *                {@link ANativeWindow_release} on it. Instead, use {@link AImageReader_delete}.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or window is NULL.</li></ul>
+ */
+media_status_t AImageReader_getWindow(AImageReader* reader, /*out*/ANativeWindow** window);
+
+/**
+ * Query the default width of the {@link AImage} generated by this reader, in pixels.
+ *
+ * <p>The width may be overridden by the producer sending buffers to this reader's
+ * {@link ANativeWindow}. If so, the actual width of the images can be found using
+ * {@link AImage_getWidth}.</p>
+ *
+ * @param reader The image reader of interest.
+ * @param width the default width of the reader will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or width is NULL.</li></ul>
+ */
+media_status_t AImageReader_getWidth(const AImageReader* reader, /*out*/int32_t* width);
+
+/**
+ * Query the default height of the {@link AImage} generated by this reader, in pixels.
+ *
+ * <p>The height may be overridden by the producer sending buffers to this reader's
+ * {@link ANativeWindow}. If so, the actual height of the images can be found using
+ * {@link AImage_getHeight}.</p>
+ *
+ * @param reader The image reader of interest.
+ * @param height the default height of the reader will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or height is NULL.</li></ul>
+ */
+media_status_t AImageReader_getHeight(const AImageReader* reader, /*out*/int32_t* height);
+
+/**
+ * Query the format of the {@link AImage} generated by this reader.
+ *
+ * @param reader The image reader of interest.
+ * @param format the fromat of the reader will be filled here if the method call succeeeds. The
+ *                value will be one of the AIMAGE_FORMAT_* enum value defiend in {@link NdkImage.h}.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or format is NULL.</li></ul>
+ */
+media_status_t AImageReader_getFormat(const AImageReader* reader, /*out*/int32_t* format);
+
+/**
+ * Query the maximum number of concurrently acquired {@link AImage}s of this reader.
+ *
+ * @param reader The image reader of interest.
+ * @param maxImages the maximum number of concurrently acquired images of the reader will be filled
+ *                here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or maxImages is NULL.</li></ul>
+ */
+media_status_t AImageReader_getMaxImages(const AImageReader* reader, /*out*/int32_t* maxImages);
+
+/**
+ * Acquire the next {@link AImage} from the image reader's queue.
+ *
+ * <p>Warning: Consider using {@link AImageReader_acquireLatestImage} instead, as it will
+ * automatically release older images, and allow slower-running processing routines to catch
+ * up to the newest frame. Usage of {@link AImageReader_acquireNextImage} is recommended for
+ * batch/background processing. Incorrectly using this method can cause images to appear
+ * with an ever-increasing delay, followed by a complete stall where no new images seem to appear.
+ * </p>
+ *
+ * <p>
+ * This method will fail if {@link AImageReader_getMaxImages maxImages} have been acquired with
+ * {@link AImageReader_acquireNextImage} or {@link AImageReader_acquireLatestImage}. In particular
+ * a sequence of {@link AImageReader_acquireNextImage} or {@link AImageReader_acquireLatestImage}
+ * calls greater than {@link AImageReader_getMaxImages maxImages} without calling
+ * {@link AImage_delete} in-between will exhaust the underlying queue. At such a time,
+ * {@link AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED} will be returned until more images are released with
+ * {@link AImage_delete}.
+ * </p>
+ *
+ * @param reader The image reader of interest.
+ * @param image the acquired {@link AImage} will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or image is NULL.</li>
+ *         <li>{@link AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED} if the number of concurrently acquired
+ *                 images has reached the limit.</li>
+ *         <li>{@link AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE} if there is no buffers currently
+ *                 available in the reader queue.</li>
+ *         <li>{@link AMEDIA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ *
+ * @see AImageReader_acquireLatestImage
+ */
+media_status_t AImageReader_acquireNextImage(AImageReader* reader, /*out*/AImage** image);
+
+/**
+
+ * Acquire the latest {@link AImage} from the image reader's queue, dropping older images.
+ *
+ * <p>
+ * This operation will acquire all the images possible from the image reader, but
+ * {@link AImage_delete} all images that aren't the latest. This function is recommended to use over
+ * {@link AImageReader_acquireNextImage} for most use-cases, as it's more suited for real-time
+ * processing.
+ * </p>
+ * <p>
+ * Note that {@link AImageReader_getMaxImages maxImages} should be at least 2 for
+ * {@link AImageReader_acquireLatestImage} to be any different than
+ * {@link AImageReader_acquireNextImage} - discarding all-but-the-newest {@link AImage} requires
+ * temporarily acquiring two {@link AImage}s at once. Or more generally, calling
+ * {@link AImageReader_acquireLatestImage} with less than two images of margin, that is
+ * (maxImages - currentAcquiredImages < 2) will not discard as expected.
+ * </p>
+ * <p>
+ * This method will fail if {@link AImageReader_getMaxImages maxImages} have been acquired with
+ * {@link AImageReader_acquireNextImage} or {@link AImageReader_acquireLatestImage}. In particular
+ * a sequence of {@link AImageReader_acquireNextImage} or {@link AImageReader_acquireLatestImage}
+ * calls greater than {@link AImageReader_getMaxImages maxImages} without calling
+ * {@link AImage_delete} in-between will exhaust the underlying queue. At such a time,
+ * {@link AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED} will be returned until more images are released with
+ * {@link AImage_delete}.
+ * </p>
+ *
+ * @param reader The image reader of interest.
+ * @param image the acquired {@link AImage} will be filled here if the method call succeeeds.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader or image is NULL.</li>
+ *         <li>{@link AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED} if the number of concurrently acquired
+ *                 images has reached the limit.</li>
+ *         <li>{@link AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE} if there is no buffers currently
+ *                 available in the reader queue.</li>
+ *         <li>{@link AMEDIA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ *
+ * @see AImageReader_acquireNextImage
+ */
+media_status_t AImageReader_acquireLatestImage(AImageReader* reader, /*out*/AImage** image);
+
+
+/**
+ * The definition of {@link AImageReader} new image available callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link AImageReader_setImageListener}.
+ * @param session The camera capture session whose state is changing.
+ */
+typedef void (*AImageReader_ImageCallback)(void* context, AImageReader* reader);
+
+typedef struct AImageReader_ImageListener {
+    /// optional application context.
+    void*                      context;
+
+    /**
+     * This callback is called when there is a new image available for in the image reader's queue.
+     *
+     * <p>The callback happens on one dedicated thread per {@link AImageReader} instance. It is okay
+     * to use AImageReader_* and AImage_* methods within the callback. Note that it is possible that
+     * calling {@link AImageReader_acquireNextImage} or {@link AImageReader_acquireLatestImage}
+     * returns {@link AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE} within this callback. For example, when
+     * there are multiple images and callbacks queued, if application called
+     * {@link AImageReader_acquireLatestImage}, some images will be returned to system before their
+     * corresponding callback is executed.</p>
+     */
+    AImageReader_ImageCallback onImageAvailable;
+} AImageReader_ImageListener;
+
+/**
+ * Set the onImageAvailable listener of this image reader.
+ *
+ * <p>Note that calling this method will replace previously registered listeners.</p>
+ *
+ * @param reader The image reader of interest.
+ * @param listener the {@link AImageReader_ImageListener} to be registered. Set this to NULL if
+ *                 application no longer needs to listen to new images.
+ *
+ * @return <ul>
+ *         <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ *         <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader is NULL.</li></ul>
+ */
+media_status_t AImageReader_setImageListener(
+        AImageReader* reader, AImageReader_ImageListener* listener);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif //_NDK_IMAGE_READER_H
+
+/** @} */
diff --git a/include/ndk/NdkMediaCodec.h b/include/ndk/NdkMediaCodec.h
index 4f6a1ef..fcb3a99 100644
--- a/include/ndk/NdkMediaCodec.h
+++ b/include/ndk/NdkMediaCodec.h
@@ -154,6 +154,18 @@
 media_status_t AMediaCodec_releaseOutputBuffer(AMediaCodec*, size_t idx, bool render);
 
 /**
+ * Dynamically sets the output surface of a codec.
+ *
+ *  This can only be used if the codec was configured with an output surface.  The
+ *  new output surface should have a compatible usage type to the original output surface.
+ *  E.g. codecs may not support switching from a SurfaceTexture (GPU readable) output
+ *  to ImageReader (software readable) output.
+ *
+ * For more details, see the Java documentation for MediaCodec.setOutputSurface.
+ */
+media_status_t AMediaCodec_setOutputSurface(AMediaCodec*, ANativeWindow* surface);
+
+/**
  * If you are done with a buffer, use this call to update its surface timestamp
  * and return it to the codec to render it on the output surface. If you
  * have not specified an output surface when configuring this video codec,
@@ -164,12 +176,18 @@
 media_status_t AMediaCodec_releaseOutputBufferAtTime(
         AMediaCodec *mData, size_t idx, int64_t timestampNs);
 
-
 typedef enum {
     AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
-    AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1
+    AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
+    AMEDIACODECRYPTOINFO_MODE_AES_WV = 2,
+    AMEDIACODECRYPTOINFO_MODE_AES_CBC = 3
 } cryptoinfo_mode_t;
 
+typedef struct {
+    int32_t encryptBlocks;
+    int32_t skipBlocks;
+} cryptoinfo_pattern_t;
+
 /**
  * Create an AMediaCodecCryptoInfo from scratch. Use this if you need to use custom
  * crypto info, rather than one obtained from AMediaExtractor.
@@ -199,6 +217,13 @@
 media_status_t AMediaCodecCryptoInfo_delete(AMediaCodecCryptoInfo*);
 
 /**
+ * Set the crypto pattern on an AMediaCryptoInfo object
+ */
+void AMediaCodecCryptoInfo_setPattern(
+        AMediaCodecCryptoInfo *info,
+        cryptoinfo_pattern_t *pattern);
+
+/**
  * The number of subsamples that make up the buffer's contents.
  */
 size_t AMediaCodecCryptoInfo_getNumSubSamples(AMediaCodecCryptoInfo*);
diff --git a/include/ndk/NdkMediaError.h b/include/ndk/NdkMediaError.h
index 12613eb..60d401b 100644
--- a/include/ndk/NdkMediaError.h
+++ b/include/ndk/NdkMediaError.h
@@ -53,6 +53,10 @@
     AMEDIA_DRM_NEED_KEY                = AMEDIA_DRM_ERROR_BASE - 8,
     AMEDIA_DRM_LICENSE_EXPIRED         = AMEDIA_DRM_ERROR_BASE - 9,
 
+    AMEDIA_IMGREADER_ERROR_BASE          = -30000,
+    AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE = AMEDIA_IMGREADER_ERROR_BASE - 1,
+    AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED = AMEDIA_IMGREADER_ERROR_BASE - 2,
+
 } media_status_t;
 
 
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 1e5064f..ffdb9b5 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -26,6 +26,8 @@
 #include <utils/RefBase.h>
 #include <audio_utils/roundup.h>
 #include <media/AudioResamplerPublic.h>
+#include <media/AudioTimestamp.h>
+#include <media/Modulo.h>
 #include <media/SingleStateQueue.h>
 
 namespace android {
@@ -58,7 +60,8 @@
     volatile int32_t mRear;     // written by producer (output: client, input: server)
     volatile int32_t mFlush;    // incremented by client to indicate a request to flush;
                                 // server notices and discards all data between mFront and mRear
-    volatile uint32_t mUnderrunFrames;  // server increments for each unavailable but desired frame
+    volatile uint32_t mUnderrunFrames; // server increments for each unavailable but desired frame
+    volatile uint32_t mUnderrunCount;  // server increments for each underrun occurrence
 };
 
 // Represents a single state of an AudioTrack that was created in static mode (shared memory buffer
@@ -116,6 +119,8 @@
 
 typedef SingleStateQueue<AudioPlaybackRate> PlaybackRateQueue;
 
+typedef SingleStateQueue<ExtendedTimestamp> ExtendedTimestampQueue;
+
 // ----------------------------------------------------------------------------
 
 // Important: do not add any virtual methods, including ~
@@ -169,12 +174,17 @@
 
                 uint16_t    mPad2;           // unused
 
+                // server write-only, client read
+                ExtendedTimestampQueue::Shared mExtendedTimestampQueue;
+
+                // This is set by AudioTrack.setBufferSizeInFrames().
+                // A write will not fill the buffer above this limit.
+    volatile    uint32_t   mBufferSizeInFrames;  // effective size of the buffer
+
 public:
 
     volatile    int32_t     mFlags;         // combinations of CBLK_*
 
-                // Cache line boundary (32 bytes)
-
 public:
                 union {
                     AudioTrackSharedStreaming   mStreaming;
@@ -203,6 +213,8 @@
         size_t  mNonContig;             // number of additional non-contiguous frames available
     };
 
+    size_t frameCount() const { return mFrameCount; }
+
 protected:
     // These refer to shared memory, and are virtual addresses with respect to the current process.
     // They may have different virtual addresses within the other process.
@@ -260,6 +272,8 @@
     //  DEAD_OBJECT Server has died or invalidated, caller should destroy this proxy and re-create.
     //  -EINTR      Call has been interrupted.  Look around to see why, and then perhaps try again.
     //  NO_INIT     Shared memory is corrupt.
+    //  NOT_ENOUGH_DATA Server has disabled the track because of underrun: restart the track
+    //              if still in active state.
     // Assertion failure on entry, if buffer == NULL or buffer->mFrameCount == 0.
     status_t    obtainBuffer(Buffer* buffer, const struct timespec *requested = NULL,
             struct timespec *elapsed = NULL);
@@ -280,11 +294,11 @@
     // Call to force an obtainBuffer() to return quickly with -EINTR
     void        interrupt();
 
-    size_t      getPosition() {
+    Modulo<uint32_t> getPosition() {
         return mEpoch + mCblk->mServer;
     }
 
-    void        setEpoch(size_t epoch) {
+    void        setEpoch(const Modulo<uint32_t> &epoch) {
         mEpoch = epoch;
     }
 
@@ -300,14 +314,38 @@
     // in order for the client to be aligned at start of buffer
     virtual size_t  getMisalignment();
 
-    size_t      getEpoch() const {
+    Modulo<uint32_t> getEpoch() const {
         return mEpoch;
     }
 
-    size_t      getFramesFilled();
+    uint32_t      getBufferSizeInFrames() const { return mBufferSizeInFrames; }
+    // See documentation for AudioTrack::setBufferSizeInFrames()
+    uint32_t      setBufferSizeInFrames(uint32_t requestedSize);
+
+    status_t    getTimestamp(ExtendedTimestamp *timestamp) {
+        if (timestamp == nullptr) {
+            return BAD_VALUE;
+        }
+        (void) mTimestampObserver.poll(mTimestamp);
+        *timestamp = mTimestamp;
+        return OK;
+    }
+
+    void        clearTimestamp() {
+        mTimestamp.clear();
+    }
 
 private:
-    size_t      mEpoch;
+    // This is a copy of mCblk->mBufferSizeInFrames
+    uint32_t   mBufferSizeInFrames;  // effective size of the buffer
+
+    Modulo<uint32_t> mEpoch;
+
+    // The shared buffer contents referred to by the timestamp observer
+    // is initialized when the server proxy created.  A local zero timestamp
+    // is initialized by the client constructor.
+    ExtendedTimestampQueue::Observer mTimestampObserver;
+    ExtendedTimestamp mTimestamp; // initialized by constructor
 };
 
 // ----------------------------------------------------------------------------
@@ -319,7 +357,9 @@
             size_t frameSize, bool clientInServer = false)
         : ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
           clientInServer),
-          mPlaybackRateMutator(&cblk->mPlaybackRateQueue) { }
+          mPlaybackRateMutator(&cblk->mPlaybackRateQueue) {
+    }
+
     virtual ~AudioTrackClientProxy() { }
 
     // No barriers on the following operations, so the ordering of loads/stores
@@ -348,6 +388,9 @@
     virtual uint32_t    getUnderrunFrames() const {
         return mCblk->u.mStreaming.mUnderrunFrames;
     }
+    virtual uint32_t    getUnderrunCount() const {
+        return mCblk->u.mStreaming.mUnderrunCount;
+    }
 
     bool        clearStreamEndDone();   // and return previous value
 
@@ -416,6 +459,16 @@
         : ClientProxy(cblk, buffers, frameCount, frameSize,
             false /*isOut*/, false /*clientInServer*/) { }
     ~AudioRecordClientProxy() { }
+
+    // Advances the client read pointer to the server write head pointer
+    // effectively flushing the client read buffer. The effect is
+    // instantaneous. Returns the number of frames flushed.
+    uint32_t    flush() {
+        int32_t rear = android_atomic_acquire_load(&mCblk->u.mStreaming.mRear);
+        int32_t front = mCblk->u.mStreaming.mFront;
+        android_atomic_release_store(rear, &mCblk->u.mStreaming.mFront);
+        return (Modulo<int32_t>(rear) - front).unsignedValue();
+    }
 };
 
 // ----------------------------------------------------------------------------
@@ -461,9 +514,28 @@
     //  buffer->mRaw is NULL.
     virtual void        releaseBuffer(Buffer* buffer);
 
+    // Return the total number of frames that AudioFlinger has obtained and released
+    virtual int64_t     framesReleased() const { return mReleased; }
+
+    // Expose timestamp to client proxy. Should only be called by a single thread.
+    virtual void        setTimestamp(const ExtendedTimestamp &timestamp) {
+        mTimestampMutator.push(timestamp);
+    }
+
+    // Total count of the number of flushed frames since creation (never reset).
+    virtual int64_t     framesFlushed() const { return mFlushed; }
+
+    // Get dynamic buffer size from the shared control block.
+    uint32_t            getBufferSizeInFrames() const {
+        return android_atomic_acquire_load((int32_t *)&mCblk->mBufferSizeInFrames);
+    }
+
 protected:
     size_t      mAvailToClient; // estimated frames available to client prior to releaseBuffer()
     int32_t     mFlush;         // our copy of cblk->u.mStreaming.mFlush, for streaming output only
+    int64_t     mReleased;      // our copy of cblk->mServer, at 64 bit resolution
+    int64_t     mFlushed;       // flushed frames to account for client-server discrepancy
+    ExtendedTimestampQueue::Mutator mTimestampMutator;
 };
 
 // Proxy used by AudioFlinger for servicing AudioTrack
@@ -472,7 +544,8 @@
     AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
             size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
         : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer),
-          mPlaybackRateObserver(&cblk->mPlaybackRateQueue) {
+          mPlaybackRateObserver(&cblk->mPlaybackRateQueue),
+          mUnderrunCount(0), mUnderrunning(false), mDrained(true) {
         mCblk->mSampleRate = sampleRate;
         mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
     }
@@ -506,15 +579,30 @@
     // and thus which resulted in an underrun.
     virtual uint32_t    getUnderrunFrames() const { return mCblk->u.mStreaming.mUnderrunFrames; }
 
-    // Return the total number of frames that AudioFlinger has obtained and released
-    virtual size_t      framesReleased() const { return mCblk->mServer; }
-
     // Return the playback speed and pitch read atomically. Not multi-thread safe on server side.
     AudioPlaybackRate getPlaybackRate();
 
+    // Set the internal drain state of the track buffer from the timestamp received.
+    virtual void        setDrained(bool drained) {
+        mDrained.store(drained);
+    }
+
+    // Check if the internal drain state of the track buffer.
+    // This is not a guarantee, but advisory for determining whether the track is
+    // fully played out.
+    virtual bool        isDrained() const {
+        return mDrained.load();
+    }
+
 private:
     AudioPlaybackRate             mPlaybackRate;  // last observed playback rate
     PlaybackRateQueue::Observer   mPlaybackRateObserver;
+
+    // The server keeps a copy here where it is safe from the client.
+    uint32_t                      mUnderrunCount; // echoed to mCblk
+    bool                          mUnderrunning;  // used to detect edge of underrun
+
+    std::atomic<bool>             mDrained; // is the track buffer drained
 };
 
 class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
@@ -558,6 +646,7 @@
     AudioRecordServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
             size_t frameSize, bool clientInServer)
         : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/, clientInServer) { }
+
 protected:
     virtual ~AudioRecordServerProxy() { }
 };
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
new file mode 100644
index 0000000..5ce1798
--- /dev/null
+++ b/media/audioserver/Android.mk
@@ -0,0 +1,59 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+	main_audioserver.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+	libaudioflinger \
+	libaudiopolicyservice \
+	libbinder \
+	libcutils \
+	liblog \
+	libmedia \
+	libmedialogservice \
+	libnbaio \
+	libradioservice \
+	libsoundtriggerservice \
+	libutils
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/services/audioflinger \
+	frameworks/av/services/audiopolicy \
+	frameworks/av/services/audiopolicy/common/managerdefinitions/include \
+	frameworks/av/services/audiopolicy/common/include \
+	frameworks/av/services/audiopolicy/engine/interface \
+	frameworks/av/services/audiopolicy/service \
+	frameworks/av/services/medialog \
+	frameworks/av/services/radio \
+	frameworks/av/services/soundtrigger \
+	$(call include-path-for, audio-utils) \
+	external/sonic \
+
+# If AUDIOSERVER_MULTILIB in device.mk is non-empty then it is used to control
+# the LOCAL_MULTILIB for all audioserver exclusive libraries.
+# This is relevant for 64 bit architectures where either or both
+# 32 and 64 bit libraries may be built.
+#
+# AUDIOSERVER_MULTILIB may be set as follows:
+#   32      to build 32 bit audioserver libraries and 32 bit audioserver.
+#   64      to build 64 bit audioserver libraries and 64 bit audioserver.
+#   both    to build both 32 bit and 64 bit libraries,
+#           and use primary target architecture (32 or 64) for audioserver.
+#   first   to build libraries and audioserver for the primary target architecture only.
+#   <empty> to build both 32 and 64 bit libraries and 32 bit audioserver.
+
+ifeq ($(strip $(AUDIOSERVER_MULTILIB)),)
+LOCAL_MULTILIB := 32
+else
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+endif
+
+LOCAL_MODULE := audioserver
+
+LOCAL_INIT_RC := audioserver.rc
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_EXECUTABLE)
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
new file mode 100644
index 0000000..2409157
--- /dev/null
+++ b/media/audioserver/audioserver.rc
@@ -0,0 +1,7 @@
+service audioserver /system/bin/audioserver
+    class main
+    user audioserver
+    # media gid needed for /dev/fm (radio) and for /data/misc/media (tee)
+    group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
new file mode 100644
index 0000000..4a7a988
--- /dev/null
+++ b/media/audioserver/main_audioserver.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "audioserver"
+//#define LOG_NDEBUG 0
+
+#include <fcntl.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <cutils/properties.h>
+
+#include <binder/IPCThreadState.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+// from LOCAL_C_INCLUDES
+#include "AudioFlinger.h"
+#include "AudioPolicyService.h"
+#include "MediaLogService.h"
+#include "RadioService.h"
+#include "SoundTriggerHwService.h"
+
+using namespace android;
+
+int main(int argc __unused, char **argv)
+{
+    signal(SIGPIPE, SIG_IGN);
+
+    bool doLog = (bool) property_get_bool("ro.test_harness", 0);
+
+    pid_t childPid;
+    // FIXME The advantage of making the process containing media.log service the parent process of
+    // the process that contains the other audio services, is that it allows us to collect more
+    // detailed information such as signal numbers, stop and continue, resource usage, etc.
+    // But it is also more complex.  Consider replacing this by independent processes, and using
+    // binder on death notification instead.
+    if (doLog && (childPid = fork()) != 0) {
+        // media.log service
+        //prctl(PR_SET_NAME, (unsigned long) "media.log", 0, 0, 0);
+        // unfortunately ps ignores PR_SET_NAME for the main thread, so use this ugly hack
+        strcpy(argv[0], "media.log");
+        sp<ProcessState> proc(ProcessState::self());
+        MediaLogService::instantiate();
+        ProcessState::self()->startThreadPool();
+        IPCThreadState::self()->joinThreadPool();
+        for (;;) {
+            siginfo_t info;
+            int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
+            if (ret == EINTR) {
+                continue;
+            }
+            if (ret < 0) {
+                break;
+            }
+            char buffer[32];
+            const char *code;
+            switch (info.si_code) {
+            case CLD_EXITED:
+                code = "CLD_EXITED";
+                break;
+            case CLD_KILLED:
+                code = "CLD_KILLED";
+                break;
+            case CLD_DUMPED:
+                code = "CLD_DUMPED";
+                break;
+            case CLD_STOPPED:
+                code = "CLD_STOPPED";
+                break;
+            case CLD_TRAPPED:
+                code = "CLD_TRAPPED";
+                break;
+            case CLD_CONTINUED:
+                code = "CLD_CONTINUED";
+                break;
+            default:
+                snprintf(buffer, sizeof(buffer), "unknown (%d)", info.si_code);
+                code = buffer;
+                break;
+            }
+            struct rusage usage;
+            getrusage(RUSAGE_CHILDREN, &usage);
+            ALOG(LOG_ERROR, "media.log", "pid %d status %d code %s user %ld.%03lds sys %ld.%03lds",
+                    info.si_pid, info.si_status, code,
+                    usage.ru_utime.tv_sec, usage.ru_utime.tv_usec / 1000,
+                    usage.ru_stime.tv_sec, usage.ru_stime.tv_usec / 1000);
+            sp<IServiceManager> sm = defaultServiceManager();
+            sp<IBinder> binder = sm->getService(String16("media.log"));
+            if (binder != 0) {
+                Vector<String16> args;
+                binder->dump(-1, args);
+            }
+            switch (info.si_code) {
+            case CLD_EXITED:
+            case CLD_KILLED:
+            case CLD_DUMPED: {
+                ALOG(LOG_INFO, "media.log", "exiting");
+                _exit(0);
+                // not reached
+                }
+            default:
+                break;
+            }
+        }
+    } else {
+        // all other services
+        if (doLog) {
+            prctl(PR_SET_PDEATHSIG, SIGKILL);   // if parent media.log dies before me, kill me also
+            setpgid(0, 0);                      // but if I die first, don't kill my parent
+        }
+        sp<ProcessState> proc(ProcessState::self());
+        sp<IServiceManager> sm = defaultServiceManager();
+        ALOGI("ServiceManager: %p", sm.get());
+        AudioFlinger::instantiate();
+        AudioPolicyService::instantiate();
+        RadioService::instantiate();
+        SoundTriggerHwService::instantiate();
+        ProcessState::self()->startThreadPool();
+        IPCThreadState::self()->joinThreadPool();
+    }
+}
diff --git a/media/img_utils/include/img_utils/TagDefinitions.h b/media/img_utils/include/img_utils/TagDefinitions.h
index e9a7480..1cc9866 100644
--- a/media/img_utils/include/img_utils/TagDefinitions.h
+++ b/media/img_utils/include/img_utils/TagDefinitions.h
@@ -193,6 +193,18 @@
 };
 
 /**
+ * Convenience values for tags with enumerated values
+ */
+
+enum {
+    TAG_ORIENTATION_NORMAL = 1,
+    TAG_ORIENTATION_ROTATE_180 = 3,
+    TAG_ORIENTATION_ROTATE_90 = 6,
+    TAG_ORIENTATION_ROTATE_270 = 8,
+    TAG_ORIENTATION_UNKNOWN = 9
+};
+
+/**
  * TIFF_EP_TAG_DEFINITIONS contains tags defined in the TIFF EP spec
  */
 const TagDefinition_t TIFF_EP_TAG_DEFINITIONS[] =  {
@@ -731,7 +743,7 @@
     { // BlackLevel
         "BlackLevel",
         0xC61Au,
-        LONG,
+        RATIONAL,
         RAW_IFD,
         0,
         UNDEFINED_ENDIAN
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp
index 9473dce..9dc5f05 100644
--- a/media/img_utils/src/DngUtils.cpp
+++ b/media/img_utils/src/DngUtils.cpp
@@ -18,6 +18,7 @@
 
 #include <inttypes.h>
 
+#include <vector>
 #include <math.h>
 
 namespace android {
@@ -63,10 +64,17 @@
     double spacingV = 1.0 / lsmHeight;
     double spacingH = 1.0 / lsmWidth;
 
-    float redMap[lsmWidth * lsmHeight];
-    float greenEvenMap[lsmWidth * lsmHeight];
-    float greenOddMap[lsmWidth * lsmHeight];
-    float blueMap[lsmWidth * lsmHeight];
+    std::vector<float> redMapVector(lsmWidth * lsmHeight);
+    float *redMap = redMapVector.data();
+
+    std::vector<float> greenEvenMapVector(lsmWidth * lsmHeight);
+    float *greenEvenMap = greenEvenMapVector.data();
+
+    std::vector<float> greenOddMapVector(lsmWidth * lsmHeight);
+    float *greenOddMap = greenOddMapVector.data();
+
+    std::vector<float> blueMapVector(lsmWidth * lsmHeight);
+    float *blueMap = blueMapVector.data();
 
     size_t lsmMapSize = lsmWidth * lsmHeight * 4;
 
diff --git a/media/img_utils/src/NOTICE b/media/img_utils/src/NOTICE
new file mode 100644
index 0000000..90cea57
--- /dev/null
+++ b/media/img_utils/src/NOTICE
@@ -0,0 +1,2 @@
+This product includes DNG technology under license by Adobe Systems
+Incorporated.
diff --git a/media/libcpustats/Android.mk b/media/libcpustats/Android.mk
index ee283a6..57fe527 100644
--- a/media/libcpustats/Android.mk
+++ b/media/libcpustats/Android.mk
@@ -8,6 +8,6 @@
 
 LOCAL_MODULE := libcpustats
 
-LOCAL_CFLAGS := -std=gnu++11 -Werror
+LOCAL_CFLAGS := -std=gnu++11 -Werror -Wall
 
 include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 4a41037..9823c55 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -141,6 +141,37 @@
 }
 #endif
 
+static bool Downmix_validChannelMask(uint32_t mask)
+{
+    if (!mask) {
+        return false;
+    }
+    // check against unsupported channels
+    if (mask & kUnsupported) {
+        ALOGE("Unsupported channels (top or front left/right of center)");
+        return false;
+    }
+    // verify has FL/FR
+    if ((mask & AUDIO_CHANNEL_OUT_STEREO) != AUDIO_CHANNEL_OUT_STEREO) {
+        ALOGE("Front channels must be present");
+        return false;
+    }
+    // verify uses SIDE as a pair (ok if not using SIDE at all)
+    if ((mask & kSides) != 0) {
+        if ((mask & kSides) != kSides) {
+            ALOGE("Side channels must be used as a pair");
+            return false;
+        }
+    }
+    // verify uses BACK as a pair (ok if not using BACK at all)
+    if ((mask & kBacks) != 0) {
+        if ((mask & kBacks) != kBacks) {
+            ALOGE("Back channels must be used as a pair");
+            return false;
+        }
+    }
+    return true;
+}
 
 /*----------------------------------------------------------------------------
  * Effect API implementation
@@ -624,9 +655,10 @@
         pDownmixer->apply_volume_correction = false;
         pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
     } else {
-        // when configuring the effect, do not allow a blank channel mask
-        if (pConfig->inputCfg.channels == 0) {
-            ALOGE("Downmix_Configure error: input channel mask can't be 0");
+        // when configuring the effect, do not allow a blank or unsupported channel mask
+        if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
+            ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
+                                                        pConfig->inputCfg.channels);
             return -EINVAL;
         }
         pDownmixer->input_channel_count =
@@ -969,34 +1001,13 @@
  */
 bool Downmix_foldGeneric(
         uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
-    // check against unsupported channels
-    if (mask & kUnsupported) {
-        ALOGE("Unsupported channels (top or front left/right of center)");
+
+    if (!Downmix_validChannelMask(mask)) {
         return false;
     }
-    // verify has FL/FR
-    if ((mask & AUDIO_CHANNEL_OUT_STEREO) != AUDIO_CHANNEL_OUT_STEREO) {
-        ALOGE("Front channels must be present");
-        return false;
-    }
-    // verify uses SIDE as a pair (ok if not using SIDE at all)
-    bool hasSides = false;
-    if ((mask & kSides) != 0) {
-        if ((mask & kSides) != kSides) {
-            ALOGE("Side channels must be used as a pair");
-            return false;
-        }
-        hasSides = true;
-    }
-    // verify uses BACK as a pair (ok if not using BACK at all)
-    bool hasBacks = false;
-    if ((mask & kBacks) != 0) {
-        if ((mask & kBacks) != kBacks) {
-            ALOGE("Back channels must be used as a pair");
-            return false;
-        }
-        hasBacks = true;
-    }
+
+    const bool hasSides = (mask & kSides) != 0;
+    const bool hasBacks = (mask & kBacks) != 0;
 
     const int numChan = audio_channel_count_from_out_mask(mask);
     const bool hasFC = ((mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) == AUDIO_CHANNEL_OUT_FRONT_CENTER);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 14a1a74..a1892e4 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -722,17 +722,20 @@
 
     if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE){
         pOutTmp = pOut;
-    }else if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE){
+    } else if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE){
         if (pContext->pBundledContext->frameCount != frameCount) {
             if (pContext->pBundledContext->workBuffer != NULL) {
                 free(pContext->pBundledContext->workBuffer);
             }
             pContext->pBundledContext->workBuffer =
-                    (LVM_INT16 *)malloc(frameCount * sizeof(LVM_INT16) * 2);
+                    (LVM_INT16 *)calloc(frameCount, sizeof(LVM_INT16) * 2);
+            if (pContext->pBundledContext->workBuffer == NULL) {
+                return -ENOMEM;
+            }
             pContext->pBundledContext->frameCount = frameCount;
         }
         pOutTmp = pContext->pBundledContext->workBuffer;
-    }else{
+    } else {
         ALOGV("LVM_ERROR : LvmBundle_process invalid access mode");
         return -EINVAL;
     }
@@ -2872,7 +2875,7 @@
     EffectContext * pContext = (EffectContext *) self;
     LVM_ReturnStatus_en     LvmStatus = LVM_SUCCESS;                /* Function call status */
     int    status = 0;
-    int    lvmStatus = 0;
+    int    processStatus = 0;
     LVM_INT16   *in  = (LVM_INT16 *)inBuffer->raw;
     LVM_INT16   *out = (LVM_INT16 *)outBuffer->raw;
 
@@ -2960,19 +2963,22 @@
         //pContext->pBundledContext->NumberEffectsEnabled,
         //pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
 
-        if(status == -ENODATA){
+        if (status == -ENODATA){
             ALOGV("\tEffect_process() processing last frame");
         }
         pContext->pBundledContext->NumberEffectsCalled = 0;
         /* Process all the available frames, block processing is
            handled internalLY by the LVM bundle */
-        lvmStatus = android::LvmBundle_process(    (LVM_INT16 *)inBuffer->raw,
+        processStatus = android::LvmBundle_process(    (LVM_INT16 *)inBuffer->raw,
                                                 (LVM_INT16 *)outBuffer->raw,
                                                 outBuffer->frameCount,
                                                 pContext);
-        if(lvmStatus != LVM_SUCCESS){
-            ALOGV("\tLVM_ERROR : LvmBundle_process returned error %d", lvmStatus);
-            return lvmStatus;
+        if (processStatus != 0){
+            ALOGV("\tLVM_ERROR : LvmBundle_process returned error %d", processStatus);
+            if (status == 0) {
+                status = processStatus;
+            }
+            return status;
         }
     } else {
         //ALOGV("\tEffect_process Not Calling process with %d effects enabled, %d called: Effect %d",
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
index ea3c59d..4e4b094 100644
--- a/media/libeffects/preprocessing/Android.mk
+++ b/media/libeffects/preprocessing/Android.mk
@@ -11,9 +11,9 @@
     PreProcessing.cpp
 
 LOCAL_C_INCLUDES += \
-    external/webrtc/src \
-    external/webrtc/src/modules/interface \
-    external/webrtc/src/modules/audio_processing/interface \
+    external/webrtc \
+    external/webrtc/webrtc/modules/include \
+    external/webrtc/webrtc/modules/audio_processing/include \
     $(call include-path-for, audio-effects)
 
 LOCAL_C_INCLUDES += $(call include-path-for, speex)
@@ -25,6 +25,10 @@
     liblog
 
 LOCAL_SHARED_LIBRARIES += libdl
+
+LOCAL_CFLAGS += \
+    -DWEBRTC_POSIX
+
 LOCAL_CFLAGS += -fvisibility=hidden
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 6dd4439..f48bac1 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -89,6 +89,7 @@
     preproc_session_t *session;     // session the effect is on
     const preproc_ops_t *ops;       // effect ops table
     preproc_fx_handle_t engine;     // handle on webRTC engine
+    uint32_t type;                  // subtype of effect
 #ifdef DUAL_MIC_TEST
     bool aux_channels_on;           // support auxiliary channels
     size_t cur_channel_config;      // current auciliary channel configuration
@@ -559,6 +560,21 @@
     ALOGV("NsInit");
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ns->set_level(kNsDefaultLevel);
+    webrtc::Config config;
+    std::vector<webrtc::Point> geometry;
+    // TODO(aluebs): Make the geometry settable.
+    geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f));
+    geometry.push_back(webrtc::Point(-0.01f, 0.f, 0.f));
+    geometry.push_back(webrtc::Point(0.01f, 0.f, 0.f));
+    geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f));
+    // The geometry needs to be set with Beamforming enabled.
+    config.Set<webrtc::Beamforming>(
+            new webrtc::Beamforming(true, geometry));
+    effect->session->apm->SetExtraOptions(config);
+    config.Set<webrtc::Beamforming>(
+            new webrtc::Beamforming(false, geometry));
+    effect->session->apm->SetExtraOptions(config);
+    effect->type = NS_TYPE_SINGLE_CHANNEL;
     return 0;
 }
 
@@ -584,11 +600,35 @@
     return status;
 }
 
-int NsSetParameter (preproc_effect_t *effect __unused,
-                    void *pParam __unused,
-                    void *pValue __unused)
+int NsSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
 {
     int status = 0;
+    webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
+    uint32_t param = *(uint32_t *)pParam;
+    uint32_t value = *(uint32_t *)pValue;
+    switch(param) {
+        case NS_PARAM_LEVEL:
+            ns->set_level((webrtc::NoiseSuppression::Level)value);
+            ALOGV("NsSetParameter() level %d", value);
+            break;
+        case NS_PARAM_TYPE:
+        {
+            webrtc::Config config;
+            std::vector<webrtc::Point> geometry;
+            bool is_beamforming_enabled =
+                    value == NS_TYPE_MULTI_CHANNEL && ns->is_enabled();
+            config.Set<webrtc::Beamforming>(
+                    new webrtc::Beamforming(is_beamforming_enabled, geometry));
+            effect->session->apm->SetExtraOptions(config);
+            effect->type = value;
+            ALOGV("NsSetParameter() type %d", value);
+            break;
+        }
+        default:
+            ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
+            status = -EINVAL;
+    }
+
     return status;
 }
 
@@ -597,6 +637,12 @@
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ALOGV("NsEnable ns %p", ns);
     ns->Enable(true);
+    if (effect->type == NS_TYPE_MULTI_CHANNEL) {
+        webrtc::Config config;
+        std::vector<webrtc::Point> geometry;
+        config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
+        effect->session->apm->SetExtraOptions(config);
+    }
 }
 
 void NsDisable(preproc_effect_t *effect)
@@ -604,6 +650,10 @@
     ALOGV("NsDisable");
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ns->Enable(false);
+    webrtc::Config config;
+    std::vector<webrtc::Point> geometry;
+    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
+    effect->session->apm->SetExtraOptions(config);
 }
 
 static const preproc_ops_t sNsOps = {
@@ -777,14 +827,17 @@
     ALOGV("Session_CreateEffect procId %d, createdMsk %08x", procId, session->createdMsk);
 
     if (session->createdMsk == 0) {
-        session->apm = webrtc::AudioProcessing::Create(session->io);
+        session->apm = webrtc::AudioProcessing::Create();
         if (session->apm == NULL) {
             ALOGW("Session_CreateEffect could not get apm engine");
             goto error;
         }
-        session->apm->set_sample_rate_hz(kPreprocDefaultSr);
-        session->apm->set_num_channels(kPreProcDefaultCnl, kPreProcDefaultCnl);
-        session->apm->set_num_reverse_channels(kPreProcDefaultCnl);
+        const webrtc::ProcessingConfig processing_config = {
+            {{kPreprocDefaultSr, kPreProcDefaultCnl},
+             {kPreprocDefaultSr, kPreProcDefaultCnl},
+             {kPreprocDefaultSr, kPreProcDefaultCnl},
+             {kPreprocDefaultSr, kPreProcDefaultCnl}}};
+        session->apm->Initialize(processing_config);
         session->procFrame = new webrtc::AudioFrame();
         if (session->procFrame == NULL) {
             ALOGW("Session_CreateEffect could not allocate audio frame");
@@ -801,11 +854,11 @@
         session->samplingRate = kPreprocDefaultSr;
         session->inChannelCount = kPreProcDefaultCnl;
         session->outChannelCount = kPreProcDefaultCnl;
-        session->procFrame->_frequencyInHz = kPreprocDefaultSr;
-        session->procFrame->_audioChannel = kPreProcDefaultCnl;
+        session->procFrame->sample_rate_hz_ = kPreprocDefaultSr;
+        session->procFrame->num_channels_ = kPreProcDefaultCnl;
         session->revChannelCount = kPreProcDefaultCnl;
-        session->revFrame->_frequencyInHz = kPreprocDefaultSr;
-        session->revFrame->_audioChannel = kPreProcDefaultCnl;
+        session->revFrame->sample_rate_hz_ = kPreprocDefaultSr;
+        session->revFrame->num_channels_ = kPreProcDefaultCnl;
         session->enabledMsk = 0;
         session->processedMsk = 0;
         session->revEnabledMsk = 0;
@@ -834,7 +887,7 @@
         session->revFrame = NULL;
         delete session->procFrame;
         session->procFrame = NULL;
-        webrtc::AudioProcessing::Destroy(session->apm);
+        delete session->apm;
         session->apm = NULL;
     }
     return status;
@@ -846,7 +899,7 @@
     ALOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
     session->createdMsk &= ~(1<<fx->procId);
     if (session->createdMsk == 0) {
-        webrtc::AudioProcessing::Destroy(session->apm);
+        delete session->apm;
         session->apm = NULL;
         delete session->procFrame;
         session->procFrame = NULL;
@@ -881,8 +934,8 @@
 int Session_SetConfig(preproc_session_t *session, effect_config_t *config)
 {
     uint32_t sr;
-    uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
-    uint32_t outCnl = audio_channel_count_from_out_mask(config->outputCfg.channels);
+    uint32_t inCnl = audio_channel_count_from_in_mask(config->inputCfg.channels);
+    uint32_t outCnl = audio_channel_count_from_in_mask(config->outputCfg.channels);
 
     if (config->inputCfg.samplingRate != config->outputCfg.samplingRate ||
         config->inputCfg.format != config->outputCfg.format ||
@@ -894,17 +947,6 @@
          config->inputCfg.samplingRate, config->inputCfg.channels);
     int status;
 
-    // if at least one process is enabled, do not accept configuration changes
-    if (session->enabledMsk) {
-        if (session->samplingRate != config->inputCfg.samplingRate ||
-                session->inChannelCount != inCnl ||
-                session->outChannelCount != outCnl) {
-            return -ENOSYS;
-        } else {
-            return 0;
-        }
-    }
-
     // AEC implementation is limited to 16kHz
     if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
         session->apmSamplingRate = 32000;
@@ -914,15 +956,13 @@
     } else if (config->inputCfg.samplingRate >= 8000) {
         session->apmSamplingRate = 8000;
     }
-    status = session->apm->set_sample_rate_hz(session->apmSamplingRate);
-    if (status < 0) {
-        return -EINVAL;
-    }
-    status = session->apm->set_num_channels(inCnl, outCnl);
-    if (status < 0) {
-        return -EINVAL;
-    }
-    status = session->apm->set_num_reverse_channels(inCnl);
+
+    const webrtc::ProcessingConfig processing_config = {
+      {{static_cast<int>(session->apmSamplingRate), inCnl},
+       {static_cast<int>(session->apmSamplingRate), outCnl},
+       {static_cast<int>(session->apmSamplingRate), inCnl},
+       {static_cast<int>(session->apmSamplingRate), inCnl}}};
+    status = session->apm->Initialize(processing_config);
     if (status < 0) {
         return -EINVAL;
     }
@@ -937,12 +977,12 @@
     }
     session->inChannelCount = inCnl;
     session->outChannelCount = outCnl;
-    session->procFrame->_audioChannel = inCnl;
-    session->procFrame->_frequencyInHz = session->apmSamplingRate;
+    session->procFrame->num_channels_ = inCnl;
+    session->procFrame->sample_rate_hz_ = session->apmSamplingRate;
 
     session->revChannelCount = inCnl;
-    session->revFrame->_audioChannel = inCnl;
-    session->revFrame->_frequencyInHz = session->apmSamplingRate;
+    session->revFrame->num_channels_ = inCnl;
+    session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
 
     // force process buffer reallocation
     session->inBufSize = 0;
@@ -1038,13 +1078,18 @@
         return -EINVAL;
     }
     uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
-    int status = session->apm->set_num_reverse_channels(inCnl);
+    const webrtc::ProcessingConfig processing_config = {
+       {{static_cast<int>(session->apmSamplingRate), session->inChannelCount},
+        {static_cast<int>(session->apmSamplingRate), session->outChannelCount},
+        {static_cast<int>(session->apmSamplingRate), inCnl},
+        {static_cast<int>(session->apmSamplingRate), inCnl}}};
+    int status = session->apm->Initialize(processing_config);
     if (status < 0) {
         return -EINVAL;
     }
     session->revChannelCount = inCnl;
-    session->revFrame->_audioChannel = inCnl;
-    session->revFrame->_frequencyInHz = session->apmSamplingRate;
+    session->revFrame->num_channels_ = inCnl;
+    session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
     // force process buffer reallocation
     session->revBufSize = 0;
     session->framesRev = 0;
@@ -1214,9 +1259,17 @@
                 fr = inBuffer->frameCount;
             }
             if (session->inBufSize < session->framesIn + fr) {
+                int16_t *buf;
                 session->inBufSize = session->framesIn + fr;
-                session->inBuf = (int16_t *)realloc(session->inBuf,
+                buf = (int16_t *)realloc(session->inBuf,
                                  session->inBufSize * session->inChannelCount * sizeof(int16_t));
+                if (buf == NULL) {
+                    session->framesIn = 0;
+                    free(session->inBuf);
+                    session->inBuf = NULL;
+                    return -ENOMEM;
+                }
+                session->inBuf = buf;
             }
             memcpy(session->inBuf + session->framesIn * session->inChannelCount,
                    inBuffer->s16,
@@ -1242,13 +1295,13 @@
                                             0,
                                             session->inBuf,
                                             &frIn,
-                                            session->procFrame->_payloadData,
+                                            session->procFrame->data_,
                                             &frOut);
             } else {
                 speex_resampler_process_interleaved_int(session->inResampler,
                                                         session->inBuf,
                                                         &frIn,
-                                                        session->procFrame->_payloadData,
+                                                        session->procFrame->data_,
                                                         &frOut);
             }
             memcpy(session->inBuf,
@@ -1260,7 +1313,7 @@
             if (inBuffer->frameCount < fr) {
                 fr = inBuffer->frameCount;
             }
-            memcpy(session->procFrame->_payloadData + session->framesIn * session->inChannelCount,
+            memcpy(session->procFrame->data_ + session->framesIn * session->inChannelCount,
                    inBuffer->s16,
                    fr * session->inChannelCount * sizeof(int16_t));
 
@@ -1280,15 +1333,22 @@
             }
             session->framesIn = 0;
         }
-        session->procFrame->_payloadDataLengthInSamples =
-                session->apmFrameCount * session->inChannelCount;
+        session->procFrame->samples_per_channel_ = session->apmFrameCount;
 
         effect->session->apm->ProcessStream(session->procFrame);
 
         if (session->outBufSize < session->framesOut + session->frameCount) {
+            int16_t *buf;
             session->outBufSize = session->framesOut + session->frameCount;
-            session->outBuf = (int16_t *)realloc(session->outBuf,
-                              session->outBufSize * session->outChannelCount * sizeof(int16_t));
+            buf = (int16_t *)realloc(session->outBuf,
+                             session->outBufSize * session->outChannelCount * sizeof(int16_t));
+            if (buf == NULL) {
+                session->framesOut = 0;
+                free(session->outBuf);
+                session->outBuf = NULL;
+                return -ENOMEM;
+            }
+            session->outBuf = buf;
         }
 
         if (session->outResampler != NULL) {
@@ -1297,13 +1357,13 @@
             if (session->inChannelCount == 1) {
                 speex_resampler_process_int(session->outResampler,
                                     0,
-                                    session->procFrame->_payloadData,
+                                    session->procFrame->data_,
                                     &frIn,
                                     session->outBuf + session->framesOut * session->outChannelCount,
                                     &frOut);
             } else {
                 speex_resampler_process_interleaved_int(session->outResampler,
-                                    session->procFrame->_payloadData,
+                                    session->procFrame->data_,
                                     &frIn,
                                     session->outBuf + session->framesOut * session->outChannelCount,
                                     &frOut);
@@ -1311,7 +1371,7 @@
             session->framesOut += frOut;
         } else {
             memcpy(session->outBuf + session->framesOut * session->outChannelCount,
-                   session->procFrame->_payloadData,
+                   session->procFrame->data_,
                    session->frameCount * session->outChannelCount * sizeof(int16_t));
             session->framesOut += session->frameCount;
         }
@@ -1744,9 +1804,17 @@
                 fr = inBuffer->frameCount;
             }
             if (session->revBufSize < session->framesRev + fr) {
+                int16_t *buf;
                 session->revBufSize = session->framesRev + fr;
-                session->revBuf = (int16_t *)realloc(session->revBuf,
-                                  session->revBufSize * session->inChannelCount * sizeof(int16_t));
+                buf = (int16_t *)realloc(session->revBuf,
+                                 session->revBufSize * session->inChannelCount * sizeof(int16_t));
+                if (buf == NULL) {
+                    session->framesRev = 0;
+                    free(session->revBuf);
+                    session->revBuf = NULL;
+                    return -ENOMEM;
+                }
+                session->revBuf = buf;
             }
             memcpy(session->revBuf + session->framesRev * session->inChannelCount,
                    inBuffer->s16,
@@ -1764,13 +1832,13 @@
                                             0,
                                             session->revBuf,
                                             &frIn,
-                                            session->revFrame->_payloadData,
+                                            session->revFrame->data_,
                                             &frOut);
             } else {
                 speex_resampler_process_interleaved_int(session->revResampler,
                                                         session->revBuf,
                                                         &frIn,
-                                                        session->revFrame->_payloadData,
+                                                        session->revFrame->data_,
                                                         &frOut);
             }
             memcpy(session->revBuf,
@@ -1782,7 +1850,7 @@
             if (inBuffer->frameCount < fr) {
                 fr = inBuffer->frameCount;
             }
-            memcpy(session->revFrame->_payloadData + session->framesRev * session->inChannelCount,
+            memcpy(session->revFrame->data_ + session->framesRev * session->inChannelCount,
                    inBuffer->s16,
                    fr * session->inChannelCount * sizeof(int16_t));
             session->framesRev += fr;
@@ -1792,8 +1860,7 @@
             }
             session->framesRev = 0;
         }
-        session->revFrame->_payloadDataLengthInSamples =
-                session->apmFrameCount * session->inChannelCount;
+        session->revFrame->samples_per_channel_ = session->apmFrameCount;
         effect->session->apm->AnalyzeReverseStream(session->revFrame);
         return 0;
     } else {
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 9f836f0..2bdfd43 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -7,7 +7,7 @@
 LOCAL_MODULE:= libmedia_helper
 LOCAL_MODULE_TAGS := optional
 
-LOCAL_C_FLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
 LOCAL_CLANG := true
 
 include $(BUILD_STATIC_LIBRARY)
@@ -30,14 +30,19 @@
     AudioSystem.cpp \
     mediaplayer.cpp \
     IMediaCodecList.cpp \
+    IMediaCodecService.cpp \
+    IMediaDrmService.cpp \
     IMediaHTTPConnection.cpp \
     IMediaHTTPService.cpp \
     IMediaLogService.cpp \
+    IMediaExtractor.cpp           \
+    IMediaExtractorService.cpp \
     IMediaPlayerService.cpp \
     IMediaPlayerClient.cpp \
     IMediaRecorderClient.cpp \
     IMediaPlayer.cpp \
     IMediaRecorder.cpp \
+    IMediaSource.cpp \
     IRemoteDisplay.cpp \
     IRemoteDisplayClient.cpp \
     IResourceManagerClient.cpp \
@@ -77,6 +82,9 @@
 
 LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
 
+# for memory heap analysis
+LOCAL_STATIC_LIBRARIES := libc_malloc_debug_backtrace libc_logging
+
 LOCAL_MODULE:= libmedia
 
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
@@ -90,6 +98,7 @@
 
 LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index ff82544..590952f 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -47,7 +47,7 @@
                 int32_t priority,
                 effect_callback_t cbf,
                 void* user,
-                int sessionId,
+                audio_session_t sessionId,
                 audio_io_handle_t io
                 )
     : mStatus(NO_INIT), mOpPackageName(opPackageName)
@@ -61,7 +61,7 @@
                 int32_t priority,
                 effect_callback_t cbf,
                 void* user,
-                int sessionId,
+                audio_session_t sessionId,
                 audio_io_handle_t io
                 )
     : mStatus(NO_INIT), mOpPackageName(opPackageName)
@@ -93,7 +93,7 @@
                 int32_t priority,
                 effect_callback_t cbf,
                 void* user,
-                int sessionId,
+                audio_session_t sessionId,
                 audio_io_handle_t io)
 {
     sp<IEffect> iEffect;
@@ -433,7 +433,7 @@
 }
 
 
-status_t AudioEffect::queryDefaultPreProcessing(int audioSession,
+status_t AudioEffect::queryDefaultPreProcessing(audio_session_t audioSession,
                                           effect_descriptor_t *descriptors,
                                           uint32_t *count)
 {
diff --git a/media/libmedia/AudioPolicy.cpp b/media/libmedia/AudioPolicy.cpp
index 9d07011..d1f7525 100644
--- a/media/libmedia/AudioPolicy.cpp
+++ b/media/libmedia/AudioPolicy.cpp
@@ -22,37 +22,37 @@
 namespace android {
 
 //
-//  AttributeMatchCriterion implementation
+//  AudioMixMatchCriterion implementation
 //
-AttributeMatchCriterion::AttributeMatchCriterion(audio_usage_t usage,
+AudioMixMatchCriterion::AudioMixMatchCriterion(audio_usage_t usage,
                                                  audio_source_t source,
                                                  uint32_t rule)
 : mRule(rule)
 {
     if (mRule == RULE_MATCH_ATTRIBUTE_USAGE ||
             mRule == RULE_EXCLUDE_ATTRIBUTE_USAGE) {
-        mAttr.mUsage = usage;
+        mValue.mUsage = usage;
     } else {
-        mAttr.mSource = source;
+        mValue.mSource = source;
     }
 }
 
-status_t AttributeMatchCriterion::readFromParcel(Parcel *parcel)
+status_t AudioMixMatchCriterion::readFromParcel(Parcel *parcel)
 {
     mRule = parcel->readInt32();
     if (mRule == RULE_MATCH_ATTRIBUTE_USAGE ||
             mRule == RULE_EXCLUDE_ATTRIBUTE_USAGE) {
-        mAttr.mUsage = (audio_usage_t)parcel->readInt32();
+        mValue.mUsage = (audio_usage_t)parcel->readInt32();
     } else {
-        mAttr.mSource = (audio_source_t)parcel->readInt32();
+        mValue.mSource = (audio_source_t)parcel->readInt32();
     }
     return NO_ERROR;
 }
 
-status_t AttributeMatchCriterion::writeToParcel(Parcel *parcel) const
+status_t AudioMixMatchCriterion::writeToParcel(Parcel *parcel) const
 {
     parcel->writeInt32(mRule);
-    parcel->writeInt32(mAttr.mUsage);
+    parcel->writeInt32(mValue.mUsage);
     return NO_ERROR;
 }
 
@@ -67,14 +67,15 @@
     mFormat.channel_mask = (audio_channel_mask_t)parcel->readInt32();
     mFormat.format = (audio_format_t)parcel->readInt32();
     mRouteFlags = parcel->readInt32();
-    mRegistrationId = parcel->readString8();
+    mDeviceType = (audio_devices_t) parcel->readInt32();
+    mDeviceAddress = parcel->readString8();
     mCbFlags = (uint32_t)parcel->readInt32();
     size_t size = (size_t)parcel->readInt32();
     if (size > MAX_CRITERIA_PER_MIX) {
         size = MAX_CRITERIA_PER_MIX;
     }
     for (size_t i = 0; i < size; i++) {
-        AttributeMatchCriterion criterion;
+        AudioMixMatchCriterion criterion;
         if (criterion.readFromParcel(parcel) == NO_ERROR) {
             mCriteria.add(criterion);
         }
@@ -89,7 +90,8 @@
     parcel->writeInt32(mFormat.channel_mask);
     parcel->writeInt32(mFormat.format);
     parcel->writeInt32(mRouteFlags);
-    parcel->writeString8(mRegistrationId);
+    parcel->writeInt32(mDeviceType);
+    parcel->writeString8(mDeviceAddress);
     parcel->writeInt32(mCbFlags);
     size_t size = mCriteria.size();
     if (size > MAX_CRITERIA_PER_MIX) {
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 011b31f..d9bb856 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -66,7 +66,7 @@
 // ---------------------------------------------------------------------------
 
 AudioRecord::AudioRecord(const String16 &opPackageName)
-    : mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
+    : mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
@@ -82,13 +82,14 @@
         callback_t cbf,
         void* user,
         uint32_t notificationFrames,
-        int sessionId,
+        audio_session_t sessionId,
         transfer_type transferType,
         audio_input_flags_t flags,
         int uid,
         pid_t pid,
         const audio_attributes_t* pAttributes)
-    : mStatus(NO_INIT),
+    : mActive(false),
+      mStatus(NO_INIT),
       mOpPackageName(opPackageName),
       mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -139,7 +140,7 @@
         void* user,
         uint32_t notificationFrames,
         bool threadCanCallJava,
-        int sessionId,
+        audio_session_t sessionId,
         transfer_type transferType,
         audio_input_flags_t flags,
         int uid,
@@ -191,10 +192,6 @@
               mAttributes.source, mAttributes.flags, mAttributes.tags);
     }
 
-    if (sampleRate == 0) {
-        ALOGE("Invalid sample rate %u", sampleRate);
-        return BAD_VALUE;
-    }
     mSampleRate = sampleRate;
 
     // these below should probably come from the audioFlinger too...
@@ -231,7 +228,7 @@
     // mNotificationFramesAct is initialized in openRecord_l
 
     if (sessionId == AUDIO_SESSION_ALLOCATE) {
-        mSessionId = AudioSystem::newAudioUniqueId();
+        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
     } else {
         mSessionId = sessionId;
     }
@@ -250,7 +247,7 @@
         mClientPid = pid;
     }
 
-    mFlags = flags;
+    mOrigFlags = mFlags = flags;
     mCbf = cbf;
 
     if (cbf != NULL) {
@@ -272,10 +269,9 @@
     }
 
     mStatus = NO_ERROR;
-    mActive = false;
     mUserData = user;
     // TODO: add audio hardware input latency here
-    mLatency = (1000*mFrameCount) / sampleRate;
+    mLatency = (1000 * mFrameCount) / mSampleRate;
     mMarkerPosition = 0;
     mMarkerReached = false;
     mNewPosition = 0;
@@ -284,13 +280,15 @@
     mSequence = 1;
     mObservedSequence = mSequence;
     mInOverrun = false;
+    mFramesRead = 0;
+    mFramesReadServerOffset = 0;
 
     return NO_ERROR;
 }
 
 // -------------------------------------------------------------------------
 
-status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
+status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
 {
     ALOGV("start, sync event %d trigger session %d", event, triggerSession);
 
@@ -299,6 +297,12 @@
         return NO_ERROR;
     }
 
+    // discard data in buffer
+    const uint32_t framesFlushed = mProxy->flush();
+    mFramesReadServerOffset -= mFramesRead + framesFlushed;
+    mFramesRead = 0;
+    mProxy->clearTimestamp();  // timestamp is invalid until next server push
+
     // reset current position as seen by client to 0
     mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
     // force refresh of remaining frames by processAudioBuffer() as last
@@ -308,6 +312,10 @@
     mNewPosition = mProxy->getPosition() + mUpdatePeriod;
     int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
 
+    // we reactivate markers (mMarkerPosition != 0) as the position is reset to 0.
+    // This is legacy behavior.  This is not done in stop() to avoid a race condition
+    // where the last marker event is issued twice.
+    mMarkerReached = false;
     mActive = true;
 
     status_t status = NO_ERROR;
@@ -348,9 +356,10 @@
     mActive = false;
     mProxy->interrupt();
     mAudioRecord->stop();
-    // the record head position will reset to 0, so if a marker is set, we need
-    // to activate it again
-    mMarkerReached = false;
+
+    // Note: legacy handling - stop does not clear record marker and
+    // periodic update position; we update those on start().
+
     sp<AudioRecordThread> t = mAudioRecordThread;
     if (t != 0) {
         t->pause();
@@ -391,7 +400,7 @@
     }
 
     AutoMutex lock(mLock);
-    *marker = mMarkerPosition;
+    mMarkerPosition.getValue(marker);
 
     return NO_ERROR;
 }
@@ -433,7 +442,7 @@
     }
 
     AutoMutex lock(mLock);
-    *position = mProxy->getPosition();
+    mProxy->getPosition().getValue(position);
 
     return NO_ERROR;
 }
@@ -444,6 +453,27 @@
     return AudioSystem::getInputFramesLost(getInputPrivate());
 }
 
+status_t AudioRecord::getTimestamp(ExtendedTimestamp *timestamp)
+{
+    if (timestamp == nullptr) {
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    status_t status = mProxy->getTimestamp(timestamp);
+    if (status == OK) {
+        timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesRead;
+        timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
+        // server side frame offset in case AudioRecord has been restored.
+        for (int i = ExtendedTimestamp::LOCATION_SERVER;
+                i < ExtendedTimestamp::LOCATION_MAX; ++i) {
+            if (timestamp->mTimeNs[i] >= 0) {
+                timestamp->mPosition[i] += mFramesReadServerOffset;
+            }
+        }
+    }
+    return status;
+}
+
 // ---- Explicit Routing ---------------------------------------------------
 status_t AudioRecord::setInputDevice(audio_port_handle_t deviceId) {
     AutoMutex lock(mLock);
@@ -475,7 +505,7 @@
 // -------------------------------------------------------------------------
 
 // must be called with mLock held
-status_t AudioRecord::openRecord_l(size_t epoch, const String16& opPackageName)
+status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
 {
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
@@ -483,28 +513,86 @@
         return NO_INIT;
     }
 
-    // Fast tracks must be at the primary _output_ [sic] sampling rate,
-    // because there is currently no concept of a primary input sampling rate
-    uint32_t afSampleRate = AudioSystem::getPrimaryOutputSamplingRate();
-    if (afSampleRate == 0) {
-        ALOGW("getPrimaryOutputSamplingRate failed");
+    if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+    }
+    audio_io_handle_t input;
+
+    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
+    // After fast request is denied, we will request again if IAudioRecord is re-created.
+
+    status_t status;
+
+    // Not a conventional loop, but a retry loop for at most two iterations total.
+    // Try first maybe with FAST flag then try again without FAST flag if that fails.
+    // Exits loop normally via a return at the bottom, or with error via a break.
+    // The sp<> references will be dropped when re-entering scope.
+    // The lack of indentation is deliberate, to reduce code churn and ease merges.
+    for (;;) {
+
+    status = AudioSystem::getInputForAttr(&mAttributes, &input,
+                                        mSessionId,
+                                        // FIXME compare to AudioTrack
+                                        mClientPid,
+                                        mClientUid,
+                                        mSampleRate, mFormat, mChannelMask,
+                                        mFlags, mSelectedDeviceId);
+
+    if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE) {
+        ALOGE("Could not get audio input for session %d, record source %d, sample rate %u, "
+              "format %#x, channel mask %#x, flags %#x",
+              mSessionId, mAttributes.source, mSampleRate, mFormat, mChannelMask, mFlags);
+        return BAD_VALUE;
+    }
+
+    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
+    // we must release it ourselves if anything goes wrong.
+
+#if 0
+    size_t afFrameCount;
+    status = AudioSystem::getFrameCount(input, &afFrameCount);
+    if (status != NO_ERROR) {
+        ALOGE("getFrameCount(input=%d) status %d", input, status);
+        break;
+    }
+#endif
+
+    uint32_t afSampleRate;
+    status = AudioSystem::getSamplingRate(input, &afSampleRate);
+    if (status != NO_ERROR) {
+        ALOGE("getSamplingRate(input=%d) status %d", input, status);
+        break;
+    }
+    if (mSampleRate == 0) {
+        mSampleRate = afSampleRate;
     }
 
     // Client can only express a preference for FAST.  Server will perform additional tests.
-    if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !((
+    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+        bool useCaseAllowed =
             // either of these use cases:
             // use case 1: callback transfer mode
             (mTransfer == TRANSFER_CALLBACK) ||
             // use case 2: obtain/release mode
-            (mTransfer == TRANSFER_OBTAIN)) &&
-            // matching sample rate
-            (mSampleRate == afSampleRate))) {
-        ALOGW("AUDIO_INPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, primary %u Hz",
+            (mTransfer == TRANSFER_OBTAIN);
+        // sample rates must also match
+        bool fastAllowed = useCaseAllowed && (mSampleRate == afSampleRate);
+        if (!fastAllowed) {
+            ALOGW("AUDIO_INPUT_FLAG_FAST denied by client; transfer %d, "
+                "track %u Hz, input %u Hz",
                 mTransfer, mSampleRate, afSampleRate);
-        // once denied, do not request again if IAudioRecord is re-created
-        mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
+            mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
+                    AUDIO_INPUT_FLAG_RAW));
+            AudioSystem::releaseInput(input, mSessionId);
+            continue;   // retry
+        }
     }
 
+    // The notification frame count is the period between callbacks, as suggested by the client
+    // but moderated by the server.  For record, the calculations are done entirely on server side.
+    size_t notificationFrames = mNotificationFramesReq;
+    size_t frameCount = mReqFrameCount;
+
     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
 
     pid_t tid = -1;
@@ -515,34 +603,9 @@
         }
     }
 
-    if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
-        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
-    }
-
-    audio_io_handle_t input;
-    status_t status = AudioSystem::getInputForAttr(&mAttributes, &input,
-                                        (audio_session_t)mSessionId,
-                                        IPCThreadState::self()->getCallingUid(),
-                                        mSampleRate, mFormat, mChannelMask,
-                                        mFlags, mSelectedDeviceId);
-
-    if (status != NO_ERROR) {
-        ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
-              "channel mask %#x, session %d, flags %#x",
-              mAttributes.source, mSampleRate, mFormat, mChannelMask, mSessionId, mFlags);
-        return BAD_VALUE;
-    }
-    {
-    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
-    // we must release it ourselves if anything goes wrong.
-
-    size_t frameCount = mReqFrameCount;
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
-    int originalSessionId = mSessionId;
-
-    // The notification frame count is the period between callbacks, as suggested by the server.
-    size_t notificationFrames = mNotificationFramesReq;
+    audio_session_t originalSessionId = mSessionId;
 
     sp<IMemory> iMem;           // for cblk
     sp<IMemory> bufferMem;
@@ -553,6 +616,7 @@
                                                        opPackageName,
                                                        &temp,
                                                        &trackFlags,
+                                                       mClientPid,
                                                        tid,
                                                        mClientUid,
                                                        &mSessionId,
@@ -565,13 +629,26 @@
 
     if (status != NO_ERROR) {
         ALOGE("AudioFlinger could not create record track, status: %d", status);
-        goto release;
+        break;
     }
     ALOG_ASSERT(record != 0);
 
     // AudioFlinger now owns the reference to the I/O handle,
     // so we are no longer responsible for releasing it.
 
+    mAwaitBoost = false;
+    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+        if (trackFlags & IAudioFlinger::TRACK_FAST) {
+            ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
+            mAwaitBoost = true;
+        } else {
+            ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
+            mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
+                    AUDIO_INPUT_FLAG_RAW));
+            continue;   // retry
+        }
+    }
+
     if (iMem == 0) {
         ALOGE("Could not get control block");
         return NO_INIT;
@@ -614,23 +691,13 @@
     }
     frameCount = temp;
 
-    mAwaitBoost = false;
-    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
-        if (trackFlags & IAudioFlinger::TRACK_FAST) {
-            ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
-            mAwaitBoost = true;
-        } else {
-            ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
-            // once denied, do not request again if IAudioRecord is re-created
-            mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
-        }
+    // Make sure that application is notified with sufficient margin before overrun.
+    // The computation is done on server side.
+    if (mNotificationFramesReq > 0 && notificationFrames != mNotificationFramesReq) {
+        ALOGW("Server adjusted notificationFrames from %u to %zu for frameCount %zu",
+                mNotificationFramesReq, notificationFrames, frameCount);
     }
-
-    // Make sure that application is notified with sufficient margin before overrun
-    if (notificationFrames == 0 || notificationFrames > frameCount) {
-        ALOGW("Received notificationFrames %zu for frameCount %zu", notificationFrames, frameCount);
-    }
-    mNotificationFramesAct = notificationFrames;
+    mNotificationFramesAct = (uint32_t) notificationFrames;
 
     // We retain a copy of the I/O handle, but don't own the reference
     mInput = input;
@@ -656,10 +723,13 @@
     }
 
     return NO_ERROR;
+
+    // End of retry loop.
+    // The lack of indentation is deliberate, to reduce code churn and ease merges.
     }
 
-release:
-    AudioSystem::releaseInput(input, (audio_session_t)mSessionId);
+// Arrive here on error, via a break
+    AudioSystem::releaseInput(input, mSessionId);
     if (status == NO_ERROR) {
         status = NO_INIT;
     }
@@ -832,7 +902,10 @@
 
         releaseBuffer(&audioBuffer);
     }
-
+    if (read > 0) {
+        mFramesRead += read / mFrameSize;
+        // mFramesReadTime = systemTime(SYSTEM_TIME_MONOTONIC); // not provided at this time.
+    }
     return read;
 }
 
@@ -885,23 +958,23 @@
     }
 
     // Get current position of server
-    size_t position = mProxy->getPosition();
+    Modulo<uint32_t> position(mProxy->getPosition());
 
     // Manage marker callback
     bool markerReached = false;
-    size_t markerPosition = mMarkerPosition;
+    Modulo<uint32_t> markerPosition(mMarkerPosition);
     // FIXME fails for wraparound, need 64 bits
-    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
+    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
         mMarkerReached = markerReached = true;
     }
 
     // Determine the number of new position callback(s) that will be needed, while locked
     size_t newPosCount = 0;
-    size_t newPosition = mNewPosition;
+    Modulo<uint32_t> newPosition(mNewPosition);
     uint32_t updatePeriod = mUpdatePeriod;
     // FIXME fails for wraparound, need 64 bits
     if (updatePeriod > 0 && position >= newPosition) {
-        newPosCount = ((position - newPosition) / updatePeriod) + 1;
+        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
         mNewPosition += updatePeriod * newPosCount;
     }
 
@@ -928,7 +1001,7 @@
         mCbf(EVENT_MARKER, mUserData, &markerPosition);
     }
     while (newPosCount > 0) {
-        size_t temp = newPosition;
+        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
         mCbf(EVENT_NEW_POS, mUserData, &temp);
         newPosition += updatePeriod;
         newPosCount--;
@@ -946,10 +1019,10 @@
     // Compute the estimated time until the next timed event (position, markers)
     uint32_t minFrames = ~0;
     if (!markerReached && position < markerPosition) {
-        minFrames = markerPosition - position;
+        minFrames = (markerPosition - position).value();
     }
     if (updatePeriod > 0) {
-        uint32_t remaining = newPosition - position;
+        uint32_t remaining = (newPosition - position).value();
         if (remaining < minFrames) {
             minFrames = remaining;
         }
@@ -983,6 +1056,7 @@
         requested = &timeout;
     }
 
+    size_t readFrames = 0;
     while (mRemainingFrames > 0) {
 
         Buffer audioBuffer;
@@ -1044,6 +1118,7 @@
         }
 
         releaseBuffer(&audioBuffer);
+        readFrames += releasedFrames;
 
         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
         // if callback doesn't like to accept the full chunk
@@ -1067,6 +1142,11 @@
 #endif
 
     }
+    if (readFrames > 0) {
+        AutoMutex lock(mLock);
+        mFramesRead += readFrames;
+        // mFramesReadTime = systemTime(SYSTEM_TIME_MONOTONIC); // not provided at this time.
+    }
     mRemainingFrames = notificationFrames;
     mRetryOnPartialBuffer = true;
 
@@ -1079,18 +1159,21 @@
     ALOGW("dead IAudioRecord, creating a new one from %s()", from);
     ++mSequence;
 
+    mFlags = mOrigFlags;
+
     // if the new IAudioRecord is created, openRecord_l() will modify the
     // following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
     // It will also delete the strong references on previous IAudioRecord and IMemory
-    size_t position = mProxy->getPosition();
+    Modulo<uint32_t> position(mProxy->getPosition());
     mNewPosition = position + mUpdatePeriod;
     status_t result = openRecord_l(position, mOpPackageName);
     if (result == NO_ERROR) {
         if (mActive) {
             // callback thread or sync event hasn't changed
             // FIXME this fails if we have a new AudioFlinger instance
-            result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
+            result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, AUDIO_SESSION_NONE);
         }
+        mFramesReadServerOffset = mFramesRead; // server resets to zero so we need an offset.
     }
     if (result != NO_ERROR) {
         ALOGW("restoreRecord_l() failed status %d", result);
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 9d645f0..808b3ab 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -37,6 +37,7 @@
 sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
 audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
 dynamic_policy_callback AudioSystem::gDynPolicyCallback = NULL;
+record_config_callback  AudioSystem::gRecordConfigCallback = NULL;
 
 
 // establish binder interface to AudioFlinger service
@@ -103,6 +104,8 @@
     return DEAD_OBJECT;
 }
 
+// FIXME Declare in binder opcode order, similarly to IAudioFlinger.h and IAudioFlinger.cpp
+
 status_t AudioSystem::muteMicrophone(bool state)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -263,25 +266,23 @@
     return getSamplingRate(output, samplingRate);
 }
 
-status_t AudioSystem::getSamplingRate(audio_io_handle_t output,
+status_t AudioSystem::getSamplingRate(audio_io_handle_t ioHandle,
                                       uint32_t* samplingRate)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-    sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
-    if (outputDesc == 0) {
-        ALOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
-        *samplingRate = af->sampleRate(output);
+    sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
+    if (desc == 0) {
+        *samplingRate = af->sampleRate(ioHandle);
     } else {
-        ALOGV("getOutputSamplingRate() reading from output desc");
-        *samplingRate = outputDesc->mSamplingRate;
+        *samplingRate = desc->mSamplingRate;
     }
     if (*samplingRate == 0) {
-        ALOGE("AudioSystem::getSamplingRate failed for output %d", output);
+        ALOGE("AudioSystem::getSamplingRate failed for ioHandle %d", ioHandle);
         return BAD_VALUE;
     }
 
-    ALOGV("getSamplingRate() output %d, sampling rate %u", output, *samplingRate);
+    ALOGV("getSamplingRate() ioHandle %d, sampling rate %u", ioHandle, *samplingRate);
 
     return NO_ERROR;
 }
@@ -302,23 +303,23 @@
     return getFrameCount(output, frameCount);
 }
 
-status_t AudioSystem::getFrameCount(audio_io_handle_t output,
+status_t AudioSystem::getFrameCount(audio_io_handle_t ioHandle,
                                     size_t* frameCount)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-    sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
-    if (outputDesc == 0) {
-        *frameCount = af->frameCount(output);
+    sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
+    if (desc == 0) {
+        *frameCount = af->frameCount(ioHandle);
     } else {
-        *frameCount = outputDesc->mFrameCount;
+        *frameCount = desc->mFrameCount;
     }
     if (*frameCount == 0) {
-        ALOGE("AudioSystem::getFrameCount failed for output %d", output);
+        ALOGE("AudioSystem::getFrameCount failed for ioHandle %d", ioHandle);
         return BAD_VALUE;
     }
 
-    ALOGV("getFrameCount() output %d, frameCount %zu", output, *frameCount);
+    ALOGV("getFrameCount() ioHandle %d, frameCount %zu", ioHandle, *frameCount);
 
     return NO_ERROR;
 }
@@ -393,14 +394,14 @@
     return result;
 }
 
-audio_unique_id_t AudioSystem::newAudioUniqueId()
+audio_unique_id_t AudioSystem::newAudioUniqueId(audio_unique_id_use_t use)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return AUDIO_UNIQUE_ID_ALLOCATE;
-    return af->newAudioUniqueId();
+    return af->newAudioUniqueId(use);
 }
 
-void AudioSystem::acquireAudioSessionId(int audioSession, pid_t pid)
+void AudioSystem::acquireAudioSessionId(audio_session_t audioSession, pid_t pid)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af != 0) {
@@ -408,7 +409,7 @@
     }
 }
 
-void AudioSystem::releaseAudioSessionId(int audioSession, pid_t pid)
+void AudioSystem::releaseAudioSessionId(audio_session_t audioSession, pid_t pid)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af != 0) {
@@ -430,6 +431,27 @@
     return af->systemReady();
 }
 
+status_t AudioSystem::getFrameCountHAL(audio_io_handle_t ioHandle,
+                                       size_t* frameCount)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return PERMISSION_DENIED;
+    sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
+    if (desc == 0) {
+        *frameCount = af->frameCountHAL(ioHandle);
+    } else {
+        *frameCount = desc->mFrameCountHAL;
+    }
+    if (*frameCount == 0) {
+        ALOGE("AudioSystem::getFrameCountHAL failed for ioHandle %d", ioHandle);
+        return BAD_VALUE;
+    }
+
+    ALOGV("getFrameCountHAL() ioHandle %d, frameCount %zu", ioHandle, *frameCount);
+
+    return NO_ERROR;
+}
+
 // ---------------------------------------------------------------------------
 
 
@@ -529,10 +551,10 @@
                 }
             }
             ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
-                    "channel mask %#x frameCount %zu deviceId %d",
+                    "channel mask %#x frameCount %zu frameCountHAL %zu deviceId %d",
                     event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
                     ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
-                    ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->getDeviceId());
+                    ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->mFrameCountHAL, ioDesc->getDeviceId());
 
         } break;
         }
@@ -652,6 +674,12 @@
     gDynPolicyCallback = cb;
 }
 
+/*static*/ void AudioSystem::setRecordConfigCallback(record_config_callback cb)
+{
+    Mutex::Autolock _l(gLock);
+    gRecordConfigCallback = cb;
+}
+
 // client singleton for AudioPolicyService binder interface
 // protected by gLockAPS
 sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
@@ -808,6 +836,7 @@
 status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
                                 audio_io_handle_t *input,
                                 audio_session_t session,
+                                pid_t pid,
                                 uid_t uid,
                                 uint32_t samplingRate,
                                 audio_format_t format,
@@ -818,7 +847,8 @@
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return NO_INIT;
     return aps->getInputForAttr(
-            attr, input, session, uid, samplingRate, format, channelMask, flags, selectedDeviceId);
+            attr, input, session, pid, uid,
+            samplingRate, format, channelMask, flags, selectedDeviceId);
 }
 
 status_t AudioSystem::startInput(audio_io_handle_t input,
@@ -897,7 +927,7 @@
 status_t AudioSystem::registerEffect(const effect_descriptor_t *desc,
                                 audio_io_handle_t io,
                                 uint32_t strategy,
-                                int session,
+                                audio_session_t session,
                                 int id)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -1159,6 +1189,20 @@
     return aps->stopAudioSource(handle);
 }
 
+status_t AudioSystem::setMasterMono(bool mono)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->setMasterMono(mono);
+}
+
+status_t AudioSystem::getMasterMono(bool *mono)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->getMasterMono(mono);
+}
+
 // ---------------------------------------------------------------------------
 
 int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
@@ -1223,6 +1267,21 @@
     }
 }
 
+void AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate(
+        int event, audio_session_t session, audio_source_t source,
+        const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+        audio_patch_handle_t patchHandle) {
+    record_config_callback cb = NULL;
+    {
+        Mutex::Autolock _l(AudioSystem::gLock);
+        cb = gRecordConfigCallback;
+    }
+
+    if (cb != NULL) {
+        cb(event, session, source, clientConfig, deviceConfig, patchHandle);
+    }
+}
+
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
 {
     {
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index ff5fe1d..e8da341 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -90,16 +90,24 @@
 // TODO: Move to a common library
 static size_t calculateMinFrameCount(
         uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
-        uint32_t sampleRate, float speed)
+        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
 {
     // Ensure that buffer depth covers at least audio hardware latency
     uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
     if (minBufCount < 2) {
         minBufCount = 2;
     }
+#if 0
+    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
+    // but keeping the code here to make it easier to add later.
+    if (minBufCount < notificationsPerBufferReq) {
+        minBufCount = notificationsPerBufferReq;
+    }
+#endif
     ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
-            "sampleRate %u  speed %f  minBufCount: %u",
-            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount);
+            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
+            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
+            /*, notificationsPerBufferReq*/);
     return minBufCount * sourceFramesNeededWithTimestretch(
             sampleRate, afFrameCount, afSampleRate, speed);
 }
@@ -144,7 +152,8 @@
 
     // When called from createTrack, speed is 1.0f (normal speed).
     // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
-    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f);
+    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
+            /*, 0 notificationsPerBufferReq*/);
 
     // The formula above should always produce a non-zero value under normal circumstances:
     // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
@@ -163,7 +172,7 @@
 
 AudioTrack::AudioTrack()
     : mStatus(NO_INIT),
-      mIsTimed(false),
+      mState(STATE_STOPPED),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
@@ -184,16 +193,17 @@
         audio_output_flags_t flags,
         callback_t cbf,
         void* user,
-        uint32_t notificationFrames,
-        int sessionId,
+        int32_t notificationFrames,
+        audio_session_t sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
         int uid,
         pid_t pid,
         const audio_attributes_t* pAttributes,
-        bool doNotReconnect)
+        bool doNotReconnect,
+        float maxRequiredSpeed)
     : mStatus(NO_INIT),
-      mIsTimed(false),
+      mState(STATE_STOPPED),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
@@ -202,7 +212,7 @@
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
-            offloadInfo, uid, pid, pAttributes, doNotReconnect);
+            offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
 }
 
 AudioTrack::AudioTrack(
@@ -214,16 +224,17 @@
         audio_output_flags_t flags,
         callback_t cbf,
         void* user,
-        uint32_t notificationFrames,
-        int sessionId,
+        int32_t notificationFrames,
+        audio_session_t sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
         int uid,
         pid_t pid,
         const audio_attributes_t* pAttributes,
-        bool doNotReconnect)
+        bool doNotReconnect,
+        float maxRequiredSpeed)
     : mStatus(NO_INIT),
-      mIsTimed(false),
+      mState(STATE_STOPPED),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
@@ -232,7 +243,7 @@
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
-            uid, pid, pAttributes, doNotReconnect);
+            uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
 }
 
 AudioTrack::~AudioTrack()
@@ -272,22 +283,25 @@
         audio_output_flags_t flags,
         callback_t cbf,
         void* user,
-        uint32_t notificationFrames,
+        int32_t notificationFrames,
         const sp<IMemory>& sharedBuffer,
         bool threadCanCallJava,
-        int sessionId,
+        audio_session_t sessionId,
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
         int uid,
         pid_t pid,
         const audio_attributes_t* pAttributes,
-        bool doNotReconnect)
+        bool doNotReconnect,
+        float maxRequiredSpeed)
 {
     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
-          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
+          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
           sessionId, transferType, uid, pid);
 
+    mThreadCanCallJava = threadCanCallJava;
+
     switch (transferType) {
     case TRANSFER_DEFAULT:
         if (sharedBuffer != 0) {
@@ -356,11 +370,16 @@
         if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
             flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
         }
+        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
+            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
+        }
     }
 
     // these below should probably come from the audioFlinger too...
     if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
+    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
+        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
     }
 
     // validate parameters
@@ -396,13 +415,13 @@
     }
 
     if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
-        if (audio_is_linear_pcm(format)) {
+        if (audio_has_proportional_frames(format)) {
             mFrameSize = channelCount * audio_bytes_per_sample(format);
         } else {
             mFrameSize = sizeof(uint8_t);
         }
     } else {
-        ALOG_ASSERT(audio_is_linear_pcm(format));
+        ALOG_ASSERT(audio_has_proportional_frames(format));
         mFrameSize = channelCount * audio_bytes_per_sample(format);
         // createTrack will return an error if PCM format is not supported by server,
         // so no need to check for specific PCM formats here
@@ -415,6 +434,8 @@
     mSampleRate = sampleRate;
     mOriginalSampleRate = sampleRate;
     mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
+    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
+    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
 
     // Make copy of input parameter offloadInfo so that in the future:
     //  (a) createTrack_l doesn't need it as an input parameter
@@ -431,10 +452,32 @@
     mSendLevel = 0.0f;
     // mFrameCount is initialized in createTrack_l
     mReqFrameCount = frameCount;
-    mNotificationFramesReq = notificationFrames;
+    if (notificationFrames >= 0) {
+        mNotificationFramesReq = notificationFrames;
+        mNotificationsPerBufferReq = 0;
+    } else {
+        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
+            ALOGE("notificationFrames=%d not permitted for non-fast track",
+                    notificationFrames);
+            return BAD_VALUE;
+        }
+        if (frameCount > 0) {
+            ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
+                    notificationFrames, frameCount);
+            return BAD_VALUE;
+        }
+        mNotificationFramesReq = 0;
+        const uint32_t minNotificationsPerBuffer = 1;
+        const uint32_t maxNotificationsPerBuffer = 8;
+        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
+                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
+        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
+                "notificationFrames=%d clamped to the range -%u to -%u",
+                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
+    }
     mNotificationFramesAct = 0;
     if (sessionId == AUDIO_SESSION_ALLOCATE) {
-        mSessionId = AudioSystem::newAudioUniqueId();
+        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
     } else {
         mSessionId = sessionId;
     }
@@ -451,7 +494,7 @@
         mClientPid = pid;
     }
     mAuxEffectId = 0;
-    mFlags = flags;
+    mOrigFlags = mFlags = flags;
     mCbf = cbf;
 
     if (cbf != NULL) {
@@ -473,7 +516,6 @@
     }
 
     mStatus = NO_ERROR;
-    mState = STATE_STOPPED;
     mUserData = user;
     mLoopCount = 0;
     mLoopStart = 0;
@@ -493,6 +535,10 @@
     mPreviousTimestampValid = false;
     mTimestampStartupGlitchReported = false;
     mRetrogradeMotionReported = false;
+    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
+    mUnderrunCountOffset = 0;
+    mFramesWritten = 0;
+    mFramesWrittenServerOffset = 0;
 
     return NO_ERROR;
 }
@@ -522,15 +568,25 @@
         mPreviousTimestampValid = false;
         mTimestampStartupGlitchReported = false;
         mRetrogradeMotionReported = false;
+        mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
 
-        // If previousState == STATE_STOPPED, we reactivate markers (mMarkerPosition != 0)
-        // as the position is reset to 0. This is legacy behavior. This is not done
-        // in stop() to avoid a race condition where the last marker event is issued twice.
-        // Note: the if is technically unnecessary because previousState == STATE_FLUSHED
-        // is only for streaming tracks, and mMarkerReached is already set to false.
-        if (previousState == STATE_STOPPED) {
-            mMarkerReached = false;
+        // read last server side position change via timestamp.
+        ExtendedTimestamp ets;
+        if (mProxy->getTimestamp(&ets) == OK &&
+                ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
+            // Server side has consumed something, but is it finished consuming?
+            // It is possible since flush and stop are asynchronous that the server
+            // is still active at this point.
+            ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
+                    (long long)(mFramesWrittenServerOffset
+                            + ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
+                    (long long)ets.mFlushed,
+                    (long long)mFramesWritten);
+            mFramesWrittenServerOffset = -ets.mPosition[ExtendedTimestamp::LOCATION_SERVER];
         }
+        mFramesWritten = 0;
+        mProxy->clearTimestamp(); // need new server push for valid timestamp
+        mMarkerReached = false;
 
         // For offloaded tracks, we don't know if the hardware counters are really zero here,
         // since the flush is asynchronous and stop may not fully drain.
@@ -545,19 +601,6 @@
     mNewPosition = mPosition + mUpdatePeriod;
     int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
 
-    sp<AudioTrackThread> t = mAudioTrackThread;
-    if (t != 0) {
-        if (previousState == STATE_STOPPING) {
-            mProxy->interrupt();
-        } else {
-            t->resume();
-        }
-    } else {
-        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
-        get_sched_policy(0, &mPreviousSchedulingGroup);
-        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
-    }
-
     status_t status = NO_ERROR;
     if (!(flags & CBLK_INVALID)) {
         status = mAudioTrack->start();
@@ -569,7 +612,21 @@
         status = restoreTrack_l("start");
     }
 
-    if (status != NO_ERROR) {
+    // resume or pause the callback thread as needed.
+    sp<AudioTrackThread> t = mAudioTrackThread;
+    if (status == NO_ERROR) {
+        if (t != 0) {
+            if (previousState == STATE_STOPPING) {
+                mProxy->interrupt();
+            } else {
+                t->resume();
+            }
+        } else {
+            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+            get_sched_policy(0, &mPreviousSchedulingGroup);
+            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
+        }
+    } else {
         ALOGE("start() status %d", status);
         mState = previousState;
         if (t != 0) {
@@ -744,7 +801,7 @@
     if (rate == mSampleRate) {
         return NO_ERROR;
     }
-    if (mIsTimed || isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
+    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
         return INVALID_OPERATION;
     }
     if (mOutput == AUDIO_IO_HANDLE_NONE) {
@@ -771,10 +828,6 @@
 
 uint32_t AudioTrack::getSampleRate() const
 {
-    if (mIsTimed) {
-        return 0;
-    }
-
     AutoMutex lock(mLock);
 
     // sample rate can be updated during playback by the offloaded decoder so we need to
@@ -794,10 +847,6 @@
 
 uint32_t AudioTrack::getOriginalSampleRate() const
 {
-    if (mIsTimed) {
-        return 0;
-    }
-
     return mOriginalSampleRate;
 }
 
@@ -807,12 +856,15 @@
     if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
         return NO_ERROR;
     }
-    if (mIsTimed || isOffloadedOrDirect_l()) {
+    if (isOffloadedOrDirect_l()) {
         return INVALID_OPERATION;
     }
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         return INVALID_OPERATION;
     }
+
+    ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
+            mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
     // pitch is emulated by adjusting speed and sampleRate
     const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
     const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
@@ -821,23 +873,29 @@
     playbackRateTemp.mSpeed = effectiveSpeed;
     playbackRateTemp.mPitch = effectivePitch;
 
+    ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
+            effectiveRate, effectiveSpeed, effectivePitch);
+
     if (!isAudioPlaybackRateValid(playbackRateTemp)) {
+        ALOGV("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
+                playbackRate.mSpeed, playbackRate.mPitch);
         return BAD_VALUE;
     }
     // Check if the buffer size is compatible.
     if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
-        ALOGV("setPlaybackRate(%f, %f) failed", playbackRate.mSpeed, playbackRate.mPitch);
+        ALOGV("setPlaybackRate(%f, %f) failed (buffer size)",
+                playbackRate.mSpeed, playbackRate.mPitch);
         return BAD_VALUE;
     }
 
     // Check resampler ratios are within bounds
-    if (effectiveRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
+    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
         ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
                 playbackRate.mSpeed, playbackRate.mPitch);
         return BAD_VALUE;
     }
 
-    if (effectiveRate * AUDIO_RESAMPLER_UP_RATIO_MAX < mSampleRate) {
+    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
         ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
                         playbackRate.mSpeed, playbackRate.mPitch);
         return BAD_VALUE;
@@ -855,9 +913,49 @@
     return mPlaybackRate;
 }
 
+ssize_t AudioTrack::getBufferSizeInFrames()
+{
+    AutoMutex lock(mLock);
+    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
+        return NO_INIT;
+    }
+    return (ssize_t) mProxy->getBufferSizeInFrames();
+}
+
+status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
+{
+    if (duration == nullptr) {
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
+        return NO_INIT;
+    }
+    ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
+    if (bufferSizeInFrames < 0) {
+        return (status_t)bufferSizeInFrames;
+    }
+    *duration = (int64_t)((double)bufferSizeInFrames * 1000000
+            / ((double)mSampleRate * mPlaybackRate.mSpeed));
+    return NO_ERROR;
+}
+
+ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
+{
+    AutoMutex lock(mLock);
+    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
+        return NO_INIT;
+    }
+    // Reject if timed track or compressed audio.
+    if (!audio_is_linear_pcm(mFormat)) {
+        return INVALID_OPERATION;
+    }
+    return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
+}
+
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
-    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
+    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -920,7 +1018,7 @@
     }
 
     AutoMutex lock(mLock);
-    *marker = mMarkerPosition;
+    mMarkerPosition.getValue(marker);
 
     return NO_ERROR;
 }
@@ -960,7 +1058,7 @@
 
 status_t AudioTrack::setPosition(uint32_t position)
 {
-    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
+    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
     if (position > mFrameCount) {
@@ -992,7 +1090,11 @@
     }
 
     AutoMutex lock(mLock);
-    if (isOffloadedOrDirect_l()) {
+    // FIXME: offloaded and direct tracks call into the HAL for render positions
+    // for compressed/synced data; however, we use proxy position for pure linear pcm data
+    // as we do not know the capability of the HAL for pcm position support and standby.
+    // There may be some latency differences between the HAL position and the proxy position.
+    if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
         uint32_t dspFrames = 0;
 
         if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
@@ -1018,14 +1120,14 @@
 
         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
         *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
-                0 : updateAndGetPosition_l();
+                0 : updateAndGetPosition_l().value();
     }
     return NO_ERROR;
 }
 
 status_t AudioTrack::getBufferPosition(uint32_t *position)
 {
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0) {
         return INVALID_OPERATION;
     }
     if (position == NULL) {
@@ -1039,7 +1141,7 @@
 
 status_t AudioTrack::reload()
 {
-    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
+    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -1129,9 +1231,12 @@
     audio_stream_type_t streamType = mStreamType;
     audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
 
+    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
+    // After fast request is denied, we will request again if IAudioTrack is re-created.
+
     status_t status;
     status = AudioSystem::getOutputForAttr(attr, &output,
-                                           (audio_session_t)mSessionId, &streamType, mClientUid,
+                                           mSessionId, &streamType, mClientUid,
                                            mSampleRate, mFormat, mChannelMask,
                                            mFlags, mSelectedDeviceId, mOffloadInfo);
 
@@ -1159,6 +1264,15 @@
         goto release;
     }
 
+    // TODO consider making this a member variable if there are other uses for it later
+    size_t afFrameCountHAL;
+    status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
+    if (status != NO_ERROR) {
+        ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
+        goto release;
+    }
+    ALOG_ASSERT(afFrameCountHAL > 0);
+
     status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
     if (status != NO_ERROR) {
         ALOGE("getSamplingRate(output=%d) status %d", output, status);
@@ -1168,35 +1282,33 @@
         mSampleRate = mAfSampleRate;
         mOriginalSampleRate = mAfSampleRate;
     }
-    // Client decides whether the track is TIMED (see below), but can only express a preference
-    // for FAST.  Server will perform additional tests.
-    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
+
+    // Client can only express a preference for FAST.  Server will perform additional tests.
+    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
+        bool useCaseAllowed =
             // either of these use cases:
             // use case 1: shared buffer
             (mSharedBuffer != 0) ||
             // use case 2: callback transfer mode
             (mTransfer == TRANSFER_CALLBACK) ||
             // use case 3: obtain/release mode
-            (mTransfer == TRANSFER_OBTAIN)) &&
-            // matching sample rate
-            (mSampleRate == mAfSampleRate))) {
-        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, output %u Hz",
+            (mTransfer == TRANSFER_OBTAIN) ||
+            // use case 4: synchronous write
+            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
+        // sample rates must also match
+        bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
+        if (!fastAllowed) {
+            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
+                "track %u Hz, output %u Hz",
                 mTransfer, mSampleRate, mAfSampleRate);
-        // once denied, do not request again if IAudioTrack is re-created
-        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
+            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
+        }
     }
 
-    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
-    //  n = 1   fast track with single buffering; nBuffering is ignored
-    //  n = 2   fast track with double buffering
-    //  n = 2   normal track, (including those with sample rate conversion)
-    //  n >= 3  very high latency or very small notification interval (unused).
-    const uint32_t nBuffering = 2;
-
     mNotificationFramesAct = mNotificationFramesReq;
 
     size_t frameCount = mReqFrameCount;
-    if (!audio_is_linear_pcm(mFormat)) {
+    if (!audio_has_proportional_frames(mFormat)) {
 
         if (mSharedBuffer != 0) {
             // Same comment as below about ignoring frameCount parameter for set()
@@ -1233,28 +1345,39 @@
         // there _is_ a frameCount parameter.  We silently ignore it.
         frameCount = mSharedBuffer->size() / mFrameSize;
     } else {
-        // For fast tracks the frame count calculations and checks are done by server
-
-        if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
-            // for normal tracks precompute the frame count based on speed.
-            const size_t minFrameCount = calculateMinFrameCount(
-                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
-                    mPlaybackRate.mSpeed);
-            if (frameCount < minFrameCount) {
-                frameCount = minFrameCount;
+        size_t minFrameCount = 0;
+        // For fast tracks the frame count calculations and checks are mostly done by server,
+        // but we try to respect the application's request for notifications per buffer.
+        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
+            if (mNotificationsPerBufferReq > 0) {
+                // Avoid possible arithmetic overflow during multiplication.
+                // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
+                if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
+                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
+                            mNotificationsPerBufferReq, afFrameCountHAL);
+                } else {
+                    minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
+                }
             }
+        } else {
+            // for normal tracks precompute the frame count based on speed.
+            const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
+                            max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
+            minFrameCount = calculateMinFrameCount(
+                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
+                    speed /*, 0 mNotificationsPerBufferReq*/);
+        }
+        if (frameCount < minFrameCount) {
+            frameCount = minFrameCount;
         }
     }
 
     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
-    if (mIsTimed) {
-        trackFlags |= IAudioFlinger::TRACK_TIMED;
-    }
 
     pid_t tid = -1;
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         trackFlags |= IAudioFlinger::TRACK_FAST;
-        if (mAudioTrackThread != 0) {
+        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
             tid = mAudioTrackThread->getTid();
         }
     }
@@ -1269,7 +1392,7 @@
 
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
-    int originalSessionId = mSessionId;
+    audio_session_t originalSessionId = mSessionId;
     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                       mSampleRate,
                                                       mFormat,
@@ -1278,6 +1401,7 @@
                                                       &trackFlags,
                                                       mSharedBuffer,
                                                       output,
+                                                      mClientPid,
                                                       tid,
                                                       &mSessionId,
                                                       mClientUid,
@@ -1294,6 +1418,7 @@
     // AudioFlinger now owns the reference to the I/O handle,
     // so we are no longer responsible for releasing it.
 
+    // FIXME compare to AudioRecord
     sp<IMemory> iMem = track->getCblk();
     if (iMem == 0) {
         ALOGE("Could not get control block");
@@ -1328,41 +1453,38 @@
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
-            mAwaitBoost = true;
+            if (!mThreadCanCallJava) {
+                mAwaitBoost = true;
+            }
         } else {
-            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
-            // once denied, do not request again if IAudioTrack is re-created
+            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
         }
     }
-    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
-        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
-            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
-        } else {
-            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
-            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
-            // FIXME This is a warning, not an error, so don't return error status
-            //return NO_INIT;
-        }
-    }
-    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
-            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
-        } else {
-            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
-            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
-            // FIXME This is a warning, not an error, so don't return error status
-            //return NO_INIT;
-        }
-    }
-    // Make sure that application is notified with sufficient margin before underrun
+
+    // Make sure that application is notified with sufficient margin before underrun.
+    // The client can divide the AudioTrack buffer into sub-buffers,
+    // and expresses its desire to server as the notification frame count.
     if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
-        // Theoretically double-buffering is not required for fast tracks,
-        // due to tighter scheduling.  But in practice, to accommodate kernels with
-        // scheduling jitter, and apps with computation jitter, we use double-buffering
-        // for fast tracks just like normal streaming tracks.
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
-            mNotificationFramesAct = frameCount / nBuffering;
+        size_t maxNotificationFrames;
+        if (trackFlags & IAudioFlinger::TRACK_FAST) {
+            // notify every HAL buffer, regardless of the size of the track buffer
+            maxNotificationFrames = afFrameCountHAL;
+        } else {
+            // For normal tracks, use at least double-buffering if no sample rate conversion,
+            // or at least triple-buffering if there is sample rate conversion
+            const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
+            maxNotificationFrames = frameCount / nBuffering;
+        }
+        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
+            if (mNotificationFramesAct == 0) {
+                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
+                    maxNotificationFrames, frameCount);
+            } else {
+                ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
+                    mNotificationFramesAct, maxNotificationFrames, frameCount);
+            }
+            mNotificationFramesAct = (uint32_t) maxNotificationFrames;
         }
     }
 
@@ -1436,7 +1558,7 @@
     }
 
 release:
-    AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
+    AudioSystem::releaseOutput(output, streamType, mSessionId);
     if (status == NO_ERROR) {
         status = NO_INIT;
     }
@@ -1517,6 +1639,10 @@
             }
             oldSequence = newSequence;
 
+            if (status == NOT_ENOUGH_DATA) {
+                restartIfDisabled();
+            }
+
             // Keep the extra references
             proxy = mProxy;
             iMem = mCblkMemory;
@@ -1539,8 +1665,7 @@
         buffer.mFrameCount = audioBuffer->frameCount;
         // FIXME starts the requested timeout and elapsed over from scratch
         status = proxy->obtainBuffer(&buffer, requested, elapsed);
-
-    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
+    } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
 
     audioBuffer->frameCount = buffer.mFrameCount;
     audioBuffer->size = buffer.mFrameCount * mFrameSize;
@@ -1573,13 +1698,16 @@
     mProxy->releaseBuffer(&buffer);
 
     // restart track if it was disabled by audioflinger due to previous underrun
-    if (mState == STATE_ACTIVE) {
-        audio_track_cblk_t* cblk = mCblk;
-        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
-            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
-            // FIXME ignoring status
-            mAudioTrack->start();
-        }
+    restartIfDisabled();
+}
+
+void AudioTrack::restartIfDisabled()
+{
+    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+    if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
+        ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
+        // FIXME ignoring status
+        mAudioTrack->start();
     }
 }
 
@@ -1587,7 +1715,7 @@
 
 ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
 {
-    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
+    if (mTransfer != TRANSFER_SYNC) {
         return INVALID_OPERATION;
     }
 
@@ -1632,78 +1760,14 @@
         releaseBuffer(&audioBuffer);
     }
 
+    if (written > 0) {
+        mFramesWritten += written / mFrameSize;
+    }
     return written;
 }
 
 // -------------------------------------------------------------------------
 
-TimedAudioTrack::TimedAudioTrack() {
-    mIsTimed = true;
-}
-
-status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
-{
-    AutoMutex lock(mLock);
-    status_t result = UNKNOWN_ERROR;
-
-#if 1
-    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioTrack> audioTrack = mAudioTrack;
-    sp<IMemory> iMem = mCblkMemory;
-#endif
-
-    // If the track is not invalid already, try to allocate a buffer.  alloc
-    // fails indicating that the server is dead, flag the track as invalid so
-    // we can attempt to restore in just a bit.
-    audio_track_cblk_t* cblk = mCblk;
-    if (!(cblk->mFlags & CBLK_INVALID)) {
-        result = mAudioTrack->allocateTimedBuffer(size, buffer);
-        if (result == DEAD_OBJECT) {
-            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
-        }
-    }
-
-    // If the track is invalid at this point, attempt to restore it. and try the
-    // allocation one more time.
-    if (cblk->mFlags & CBLK_INVALID) {
-        result = restoreTrack_l("allocateTimedBuffer");
-
-        if (result == NO_ERROR) {
-            result = mAudioTrack->allocateTimedBuffer(size, buffer);
-        }
-    }
-
-    return result;
-}
-
-status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
-                                           int64_t pts)
-{
-    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
-    {
-        AutoMutex lock(mLock);
-        audio_track_cblk_t* cblk = mCblk;
-        // restart track if it was disabled by audioflinger due to previous underrun
-        if (buffer->size() != 0 && status == NO_ERROR &&
-                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
-            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
-            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
-            // FIXME ignoring status
-            mAudioTrack->start();
-        }
-    }
-    return status;
-}
-
-status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
-                                                TargetTimeline target)
-{
-    return mAudioTrack->setMediaTimeTransform(xform, target);
-}
-
-// -------------------------------------------------------------------------
-
 nsecs_t AudioTrack::processAudioBuffer()
 {
     // Currently the AudioTrack thread is not created if there are no callbacks.
@@ -1774,23 +1838,23 @@
     }
 
     // Get current position of server
-    size_t position = updateAndGetPosition_l();
+    Modulo<uint32_t> position(updateAndGetPosition_l());
 
     // Manage marker callback
     bool markerReached = false;
-    size_t markerPosition = mMarkerPosition;
-    // FIXME fails for wraparound, need 64 bits
-    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
+    Modulo<uint32_t> markerPosition(mMarkerPosition);
+    // uses 32 bit wraparound for comparison with position.
+    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
         mMarkerReached = markerReached = true;
     }
 
     // Determine number of new position callback(s) that will be needed, while locked
     size_t newPosCount = 0;
-    size_t newPosition = mNewPosition;
-    size_t updatePeriod = mUpdatePeriod;
+    Modulo<uint32_t> newPosition(mNewPosition);
+    uint32_t updatePeriod = mUpdatePeriod;
     // FIXME fails for wraparound, need 64 bits
     if (updatePeriod > 0 && position >= newPosition) {
-        newPosCount = ((position - newPosition) / updatePeriod) + 1;
+        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
         mNewPosition += updatePeriod * newPosCount;
     }
 
@@ -1891,7 +1955,7 @@
         mCbf(EVENT_MARKER, mUserData, &markerPosition);
     }
     while (newPosCount > 0) {
-        size_t temp = newPosition;
+        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
         mCbf(EVENT_NEW_POS, mUserData, &temp);
         newPosition += updatePeriod;
         newPosCount--;
@@ -1915,14 +1979,14 @@
     // FIXME only for non-compressed audio
     uint32_t minFrames = ~0;
     if (!markerReached && position < markerPosition) {
-        minFrames = markerPosition - position;
+        minFrames = (markerPosition - position).value();
     }
     if (loopPeriod > 0 && loopPeriod < minFrames) {
         // loopPeriod is already adjusted for actual position.
         minFrames = loopPeriod;
     }
     if (updatePeriod > 0) {
-        minFrames = min(minFrames, uint32_t(newPosition - position));
+        minFrames = min(minFrames, (newPosition - position).value());
     }
 
     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
@@ -1965,6 +2029,7 @@
         requested = &timeout;
     }
 
+    size_t writtenFrames = 0;
     while (mRemainingFrames > 0) {
 
         Buffer audioBuffer;
@@ -1987,7 +2052,7 @@
             return NS_NEVER;
         }
 
-        if (mRetryOnPartialBuffer && audio_is_linear_pcm(mFormat)) {
+        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
             mRetryOnPartialBuffer = false;
             if (avail < mRemainingFrames) {
                 if (ns > 0) { // account for obtain time
@@ -2033,11 +2098,12 @@
             // buffer size and skip the loop entirely.
 
             nsecs_t myns;
-            if (audio_is_linear_pcm(mFormat)) {
+            if (audio_has_proportional_frames(mFormat)) {
                 // time to wait based on buffer occupancy
                 const nsecs_t datans = mRemainingFrames <= avail ? 0 :
                         framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
                 // audio flinger thread buffer size (TODO: adjust for fast tracks)
+                // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
                 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
                 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
                 myns = datans + (afns / 2);
@@ -2066,6 +2132,7 @@
         }
 
         releaseBuffer(&audioBuffer);
+        writtenFrames += releasedFrames;
 
         // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
         // if callback doesn't like to accept the full chunk
@@ -2089,6 +2156,10 @@
 #endif
 
     }
+    if (writtenFrames > 0) {
+        AutoMutex lock(mLock);
+        mFramesWritten += writtenFrames;
+    }
     mRemainingFrames = notificationFrames;
     mRetryOnPartialBuffer = true;
 
@@ -2112,6 +2183,9 @@
         return DEAD_OBJECT;
     }
 
+    // Save so we can return count since creation.
+    mUnderrunCountOffset = getUnderrunCount_l();
+
     // save the old static buffer position
     size_t bufferPosition = 0;
     int loopCount = 0;
@@ -2119,6 +2193,8 @@
         mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
     }
 
+    mFlags = mOrigFlags;
+
     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioTrack and IMemory.
@@ -2146,6 +2222,7 @@
         }
         if (mState == STATE_ACTIVE) {
             result = mAudioTrack->start();
+            mFramesWrittenServerOffset = mFramesWritten; // server resets to zero so we offset
         }
     }
     if (result != NO_ERROR) {
@@ -2157,12 +2234,11 @@
     return result;
 }
 
-uint32_t AudioTrack::updateAndGetPosition_l()
+Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
 {
     // This is the sole place to read server consumed frames
-    uint32_t newServer = mProxy->getPosition();
-    int32_t delta = newServer - mServer;
-    mServer = newServer;
+    Modulo<uint32_t> newServer(mProxy->getPosition());
+    const int32_t delta = (newServer - mServer).signedValue();
     // TODO There is controversy about whether there can be "negative jitter" in server position.
     //      This should be investigated further, and if possible, it should be addressed.
     //      A more definite failure mode is infrequent polling by client.
@@ -2171,11 +2247,14 @@
     //      That should ensure delta never goes negative for infrequent polling
     //      unless the server has more than 2^31 frames in its buffer,
     //      in which case the use of uint32_t for these counters has bigger issues.
-    if (delta < 0) {
-        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
-        delta = 0;
+    ALOGE_IF(delta < 0,
+            "detected illegal retrograde motion by the server: mServer advanced by %d",
+            delta);
+    mServer = newServer;
+    if (delta > 0) { // avoid retrograde
+        mPosition += delta;
     }
-    return mPosition += (uint32_t) delta;
+    return mPosition;
 }
 
 bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
@@ -2185,7 +2264,8 @@
         return true; // static tracks do not have issues with buffer sizing.
     }
     const size_t minFrameCount =
-            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed);
+            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
+                /*, 0 mNotificationsPerBufferReq*/);
     ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
             mFrameCount, minFrameCount);
     return mFrameCount >= minFrameCount;
@@ -2197,6 +2277,47 @@
     return mAudioTrack->setParameters(keyValuePairs);
 }
 
+status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
+{
+    if (timestamp == nullptr) {
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    return getTimestamp_l(timestamp);
+}
+
+status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
+{
+    if (mCblk->mFlags & CBLK_INVALID) {
+        const status_t status = restoreTrack_l("getTimestampExtended");
+        if (status != OK) {
+            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
+            // recommending that the track be recreated.
+            return DEAD_OBJECT;
+        }
+    }
+    // check for offloaded/direct here in case restoring somehow changed those flags.
+    if (isOffloadedOrDirect_l()) {
+        return INVALID_OPERATION; // not supported
+    }
+    status_t status = mProxy->getTimestamp(timestamp);
+    LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
+    bool found = false;
+    timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
+    timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
+    // server side frame offset in case AudioTrack has been restored.
+    for (int i = ExtendedTimestamp::LOCATION_SERVER;
+            i < ExtendedTimestamp::LOCATION_MAX; ++i) {
+        if (timestamp->mTimeNs[i] >= 0) {
+            // apply server offset (frames flushed is ignored
+            // so we don't report the jump when the flush occurs).
+            timestamp->mPosition[i] += mFramesWrittenServerOffset;
+            found = true;
+        }
+    }
+    return found ? OK : WOULD_BLOCK;
+}
+
 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
 {
     AutoMutex lock(mLock);
@@ -2205,11 +2326,6 @@
     // Set false here to cover all the error return cases.
     mPreviousTimestampValid = false;
 
-    // FIXME not implemented for fast tracks; should use proxy and SSQ
-    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
-        return INVALID_OPERATION;
-    }
-
     switch (mState) {
     case STATE_ACTIVE:
     case STATE_PAUSED:
@@ -2239,7 +2355,60 @@
 
     // The presented frame count must always lag behind the consumed frame count.
     // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
-    status_t status = mAudioTrack->getTimestamp(timestamp);
+
+    status_t status;
+    if (isOffloadedOrDirect_l()) {
+        // use Binder to get timestamp
+        status = mAudioTrack->getTimestamp(timestamp);
+    } else {
+        // read timestamp from shared memory
+        ExtendedTimestamp ets;
+        status = mProxy->getTimestamp(&ets);
+        if (status == OK) {
+            ExtendedTimestamp::Location location;
+            status = ets.getBestTimestamp(&timestamp, &location);
+
+            if (status == OK) {
+                // It is possible that the best location has moved from the kernel to the server.
+                // In this case we adjust the position from the previous computed latency.
+                if (location == ExtendedTimestamp::LOCATION_SERVER) {
+                    ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
+                            "getTimestamp() location moved from kernel to server");
+                    // check that the last kernel OK time info exists and the positions
+                    // are valid (if they predate the current track, the positions may
+                    // be zero or negative).
+                    const int64_t frames =
+                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
+                            ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
+                            ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
+                            ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
+                            ?
+                            int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
+                                    / 1000)
+                            :
+                            (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
+                            - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
+                    ALOGV("frame adjustment:%lld  timestamp:%s",
+                            (long long)frames, ets.toString().c_str());
+                    if (frames >= ets.mPosition[location]) {
+                        timestamp.mPosition = 0;
+                    } else {
+                        timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
+                    }
+                } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
+                    ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
+                            "getTimestamp() location moved from server to kernel");
+                }
+                mPreviousLocation = location;
+            } else {
+                // right after AudioTrack is started, one may not find a timestamp
+                ALOGV("getBestTimestamp did not find timestamp");
+            }
+        }
+        if (status == INVALID_OPERATION) {
+            status = WOULD_BLOCK;
+        }
+    }
     if (status != NO_ERROR) {
         ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
         return status;
@@ -2309,15 +2478,19 @@
         // If this delta between these is greater than the client position, it means that
         // actually presented is still stuck at the starting line (figuratively speaking),
         // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
-        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
+        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
+        // mPosition exceeds 32 bits.
+        // TODO Remove when timestamp is updated to contain pipeline status info.
+        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
+        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
+                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
             return INVALID_OPERATION;
         }
         // Convert timestamp position from server time base to client time base.
         // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
         // But if we change it to 64-bit then this could fail.
-        // If (mPosition - mServer) can be negative then should use:
-        //   (int32_t)(mPosition - mServer)
-        timestamp.mPosition += mPosition - mServer;
+        // Use Modulo computation here.
+        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
         // Immediately after a call to getPosition_l(), mPosition and
         // mServer both represent the same frame position.  mPosition is
         // in client's point of view, and mServer is in server's point of
@@ -2331,9 +2504,9 @@
     // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
     if (status == NO_ERROR) {
         if (previousTimestampValid) {
-#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * 1000000000 + time.tv_nsec)
-            const uint64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
-            const uint64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
+#define TIME_TO_NANOS(time) ((int64_t)time.tv_sec * 1000000000 + time.tv_nsec)
+            const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
+            const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
 #undef TIME_TO_NANOS
             if (currentTimeNanos < previousTimeNanos) {
                 ALOGW("retrograde timestamp time");
@@ -2342,8 +2515,8 @@
 
             // Looking at signed delta will work even when the timestamps
             // are wrapping around.
-            int32_t deltaPosition = static_cast<int32_t>(timestamp.mPosition
-                    - mPreviousTimestamp.mPosition);
+            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
+                    - mPreviousTimestamp.mPosition).signedValue();
             // position can bobble slightly as an artifact; this hides the bobble
             static const int32_t MINIMUM_POSITION_DELTA = 8;
             if (deltaPosition < 0) {
@@ -2421,6 +2594,17 @@
     return NO_ERROR;
 }
 
+uint32_t AudioTrack::getUnderrunCount() const
+{
+    AutoMutex lock(mLock);
+    return getUnderrunCount_l();
+}
+
+uint32_t AudioTrack::getUnderrunCount_l() const
+{
+    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
+}
+
 uint32_t AudioTrack::getUnderrunFrames() const
 {
     AutoMutex lock(mLock);
@@ -2469,6 +2653,56 @@
     return NO_ERROR;
 }
 
+status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
+{
+    if (msec == nullptr ||
+            (location != ExtendedTimestamp::LOCATION_SERVER
+                    && location != ExtendedTimestamp::LOCATION_KERNEL)) {
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    // inclusive of offloaded and direct tracks.
+    //
+    // It is possible, but not enabled, to allow duration computation for non-pcm
+    // audio_has_proportional_frames() formats because currently they have
+    // the drain rate equivalent to the pcm sample rate * framesize.
+    if (!isPurePcmData_l()) {
+        return INVALID_OPERATION;
+    }
+    ExtendedTimestamp ets;
+    if (getTimestamp_l(&ets) == OK
+            && ets.mTimeNs[location] > 0) {
+        int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
+                - ets.mPosition[location];
+        if (diff < 0) {
+            *msec = 0;
+        } else {
+            // ms is the playback time by frames
+            int64_t ms = (int64_t)((double)diff * 1000 /
+                    ((double)mSampleRate * mPlaybackRate.mSpeed));
+            // clockdiff is the timestamp age (negative)
+            int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
+                    ets.mTimeNs[location]
+                    + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
+                    - systemTime(SYSTEM_TIME_MONOTONIC);
+
+            //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
+            static const int NANOS_PER_MILLIS = 1000000;
+            *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
+        }
+        return NO_ERROR;
+    }
+    if (location != ExtendedTimestamp::LOCATION_SERVER) {
+        return INVALID_OPERATION; // LOCATION_KERNEL is not available
+    }
+    // use server position directly (offloaded and direct arrive here)
+    updateAndGetPosition_l();
+    int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
+    *msec = (diff <= 0) ? 0
+            : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
+    return NO_ERROR;
+}
+
 // =========================================================================
 
 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index caa84fb..7119517 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -38,7 +38,7 @@
 // In general, this means (new_self) returned is max(self, other) + 1.
 
 static uint32_t incrementSequence(uint32_t self, uint32_t other) {
-    int32_t diff = self - other;
+    int32_t diff = (int32_t) self - (int32_t) other;
     if (diff >= 0 && diff < INT32_MAX) {
         return self + 1; // we're already ahead of other.
     }
@@ -46,8 +46,10 @@
 }
 
 audio_track_cblk_t::audio_track_cblk_t()
-    : mServer(0), mFutex(0), mMinimum(0),
-    mVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY), mSampleRate(0), mSendLevel(0), mFlags(0)
+    : mServer(0), mFutex(0), mMinimum(0)
+    , mVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY), mSampleRate(0), mSendLevel(0)
+    , mBufferSizeInFrames(0)
+    , mFlags(0)
 {
     memset(&u, 0, sizeof(u));
 }
@@ -66,8 +68,11 @@
 
 ClientProxy::ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
         size_t frameSize, bool isOut, bool clientInServer)
-    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer), mEpoch(0)
+    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer)
+    , mEpoch(0)
+    , mTimestampObserver(&cblk->mExtendedTimestampQueue)
 {
+    setBufferSizeInFrames(frameCount);
 }
 
 const struct timespec ClientProxy::kForever = {INT_MAX /*tv_sec*/, 0 /*tv_nsec*/};
@@ -81,6 +86,28 @@
 // order of minutes.
 #define MAX_SEC    5
 
+uint32_t ClientProxy::setBufferSizeInFrames(uint32_t size)
+{
+    // The minimum should be  greater than zero and less than the size
+    // at which underruns will occur.
+    const uint32_t minimum = 16; // based on AudioMixer::BLOCKSIZE
+    const uint32_t maximum = frameCount();
+    uint32_t clippedSize = size;
+    if (maximum < minimum) {
+        clippedSize = maximum;
+    } else if (clippedSize < minimum) {
+        clippedSize = minimum;
+    } else if (clippedSize > maximum) {
+        clippedSize = maximum;
+    }
+    // for server to read
+    android_atomic_release_store(clippedSize, (int32_t *)&mCblk->mBufferSizeInFrames);
+    // for client to read
+    mBufferSizeInFrames = clippedSize;
+    return clippedSize;
+}
+
+__attribute__((no_sanitize("integer")))
 status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
         struct timespec *elapsed)
 {
@@ -126,6 +153,11 @@
             status = DEAD_OBJECT;
             goto end;
         }
+        if (flags & CBLK_DISABLED) {
+            ALOGV("Track disabled");
+            status = NOT_ENOUGH_DATA;
+            goto end;
+        }
         // check for obtainBuffer interrupted by client
         if (!ignoreInitialPendingInterrupt && (flags & CBLK_INTERRUPT)) {
             ALOGV("obtainBuffer() interrupted by client");
@@ -151,6 +183,7 @@
             rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
             front = cblk->u.mStreaming.mFront;
         }
+        // write to rear, read from front
         ssize_t filled = rear - front;
         // pipe should not be overfull
         if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
@@ -166,9 +199,15 @@
             cblk->u.mStreaming.mFront = rear;
             (void) android_atomic_or(CBLK_OVERRUN, &cblk->mFlags);
         }
-        // don't allow filling pipe beyond the nominal size
-        size_t avail = mIsOut ? mFrameCount - filled : filled;
-        if (avail > 0) {
+        // Don't allow filling pipe beyond the user settable size.
+        // The calculation for avail can go negative if the buffer size
+        // is suddenly dropped below the amount already in the buffer.
+        // So use a signed calculation to prevent a numeric overflow abort.
+        ssize_t adjustableSize = (ssize_t) getBufferSizeInFrames();
+        ssize_t avail =  (mIsOut) ? adjustableSize - filled : filled;
+        if (avail < 0) {
+            avail = 0;
+        } else if (avail > 0) {
             // 'avail' may be non-contiguous, so return only the first contiguous chunk
             size_t part1;
             if (mIsOut) {
@@ -178,7 +217,7 @@
                 front &= mFrameCountP2 - 1;
                 part1 = mFrameCountP2 - front;
             }
-            if (part1 > avail) {
+            if (part1 > (size_t)avail) {
                 part1 = avail;
             }
             if (part1 > buffer->mFrameCount) {
@@ -240,6 +279,7 @@
             errno = 0;
             (void) syscall(__NR_futex, &cblk->mFutex,
                     mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
+            status_t error = errno; // clock_gettime can affect errno
             // update total elapsed time spent waiting
             if (measure) {
                 struct timespec after;
@@ -257,7 +297,7 @@
                 before = after;
                 beforeIsValid = true;
             }
-            switch (errno) {
+            switch (error) {
             case 0:            // normal wakeup by server, or by binderDied()
             case EWOULDBLOCK:  // benign race condition with server
             case EINTR:        // wait was interrupted by signal or other spurious wakeup
@@ -265,7 +305,7 @@
                 // FIXME these error/non-0 status are being dropped
                 break;
             default:
-                status = errno;
+                status = error;
                 ALOGE("%s unexpected error %s", __func__, strerror(status));
                 goto end;
             }
@@ -293,6 +333,7 @@
     return status;
 }
 
+__attribute__((no_sanitize("integer")))
 void ClientProxy::releaseBuffer(Buffer* buffer)
 {
     LOG_ALWAYS_FATAL_IF(buffer == NULL);
@@ -338,6 +379,7 @@
     }
 }
 
+__attribute__((no_sanitize("integer")))
 size_t ClientProxy::getMisalignment()
 {
     audio_track_cblk_t* cblk = mCblk;
@@ -345,27 +387,6 @@
             (mFrameCountP2 - 1);
 }
 
-size_t ClientProxy::getFramesFilled() {
-    audio_track_cblk_t* cblk = mCblk;
-    int32_t front;
-    int32_t rear;
-
-    if (mIsOut) {
-        front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
-        rear = cblk->u.mStreaming.mRear;
-    } else {
-        rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
-        front = cblk->u.mStreaming.mFront;
-    }
-    ssize_t filled = rear - front;
-    // pipe should not be overfull
-    if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
-        ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
-        return 0;
-    }
-    return (size_t)filled;
-}
-
 // ---------------------------------------------------------------------------
 
 void AudioTrackClientProxy::flush()
@@ -420,7 +441,8 @@
             status = DEAD_OBJECT;
             goto end;
         }
-        if (flags & CBLK_STREAM_END_DONE) {
+        // a track is not supposed to underrun at this stage but consider it done
+        if (flags & (CBLK_STREAM_END_DONE | CBLK_DISABLED)) {
             ALOGV("stream end received");
             status = NO_ERROR;
             goto end;
@@ -593,10 +615,13 @@
 ServerProxy::ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
         size_t frameSize, bool isOut, bool clientInServer)
     : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer),
-      mAvailToClient(0), mFlush(0)
+      mAvailToClient(0), mFlush(0), mReleased(0), mFlushed(0)
+    , mTimestampMutator(&cblk->mExtendedTimestampQueue)
 {
+    cblk->mBufferSizeInFrames = frameCount;
 }
 
+__attribute__((no_sanitize("integer")))
 status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
 {
     LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
@@ -646,6 +671,7 @@
                             mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
                 }
             }
+            mFlushed += (newFront - front) & mask;
             front = newFront;
         }
     } else {
@@ -706,6 +732,7 @@
     return NO_INIT;
 }
 
+__attribute__((no_sanitize("integer")))
 void ServerProxy::releaseBuffer(Buffer* buffer)
 {
     LOG_ALWAYS_FATAL_IF(buffer == NULL);
@@ -729,6 +756,7 @@
     }
 
     cblk->mServer += stepCount;
+    mReleased += stepCount;
 
     size_t half = mFrameCount / 2;
     if (half == 0) {
@@ -757,6 +785,7 @@
 
 // ---------------------------------------------------------------------------
 
+__attribute__((no_sanitize("integer")))
 size_t AudioTrackServerProxy::framesReady()
 {
     LOG_ALWAYS_FATAL_IF(!mIsOut);
@@ -800,10 +829,25 @@
 void AudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount)
 {
     audio_track_cblk_t* cblk = mCblk;
-    cblk->u.mStreaming.mUnderrunFrames += frameCount;
+    if (frameCount > 0) {
+        cblk->u.mStreaming.mUnderrunFrames += frameCount;
 
-    // FIXME also wake futex so that underrun is noticed more quickly
-    (void) android_atomic_or(CBLK_UNDERRUN, &cblk->mFlags);
+        if (!mUnderrunning) { // start of underrun?
+            mUnderrunCount++;
+            cblk->u.mStreaming.mUnderrunCount = mUnderrunCount;
+            mUnderrunning = true;
+            ALOGV("tallyUnderrunFrames(%3u) at uf = %u, bump mUnderrunCount = %u",
+                frameCount, cblk->u.mStreaming.mUnderrunFrames, mUnderrunCount);
+        }
+
+        // FIXME also wake futex so that underrun is noticed more quickly
+        (void) android_atomic_or(CBLK_UNDERRUN, &cblk->mFlags);
+    } else {
+        ALOGV_IF(mUnderrunning,
+            "tallyUnderrunFrames(%3u) at uf = %u, underrun finished",
+            frameCount, cblk->u.mStreaming.mUnderrunFrames);
+        mUnderrunning = false; // so we can detect the next edge
+    }
 }
 
 AudioPlaybackRate AudioTrackServerProxy::getPlaybackRate()
@@ -893,7 +937,7 @@
     if (mObserver.poll(state)) {
         StaticAudioTrackState trystate = mState;
         bool result;
-        const int32_t diffSeq = state.mLoopSequence - state.mPositionSequence;
+        const int32_t diffSeq = (int32_t) state.mLoopSequence - (int32_t) state.mPositionSequence;
 
         if (diffSeq < 0) {
             result = updateStateWithLoop(&trystate, state) == OK &&
@@ -1014,6 +1058,8 @@
     mFramesReadySafe = clampToSize(mFramesReady);
 
     cblk->mServer += stepCount;
+    mReleased += stepCount;
+
     // This may overflow, but client is not supposed to rely on it
     StaticAudioTrackPosLoop posLoop;
     posLoop.mBufferPosition = mState.mPosition;
@@ -1029,7 +1075,7 @@
     buffer->mNonContig = 0;
 }
 
-void StaticAudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount __unused)
+void StaticAudioTrackServerProxy::tallyUnderrunFrames(uint32_t frameCount)
 {
     // Unlike AudioTrackServerProxy::tallyUnderrunFrames() used for streaming tracks,
     // we don't have a location to count underrun frames.  The underrun frame counter
@@ -1037,7 +1083,9 @@
     // possible for static buffer tracks other than at end of buffer, so this is not a loss.
 
     // FIXME also wake futex so that underrun is noticed more quickly
-    (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->mFlags);
+    if (frameCount > 0) {
+        (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->mFlags);
+    }
 }
 
 // ---------------------------------------------------------------------------
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 0bf503a..92e65e4 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -81,7 +81,8 @@
     LIST_AUDIO_PATCHES,
     SET_AUDIO_PORT_CONFIG,
     GET_AUDIO_HW_SYNC,
-    SYSTEM_READY
+    SYSTEM_READY,
+    FRAME_COUNT_HAL,
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -103,8 +104,9 @@
                                 track_flags_t *flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
+                                pid_t pid,
                                 pid_t tid,
-                                int *sessionId,
+                                audio_session_t *sessionId,
                                 int clientUid,
                                 status_t *status)
     {
@@ -127,8 +129,9 @@
             data.writeInt32(false);
         }
         data.writeInt32((int32_t) output);
+        data.writeInt32((int32_t) pid);
         data.writeInt32((int32_t) tid);
-        int lSessionId = AUDIO_SESSION_ALLOCATE;
+        audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
         }
@@ -146,7 +149,7 @@
             if (flags != NULL) {
                 *flags = lFlags;
             }
-            lSessionId = reply.readInt32();
+            lSessionId = (audio_session_t) reply.readInt32();
             if (sessionId != NULL) {
                 *sessionId = lSessionId;
             }
@@ -178,9 +181,10 @@
                                 const String16& opPackageName,
                                 size_t *pFrameCount,
                                 track_flags_t *flags,
+                                pid_t pid,
                                 pid_t tid,
                                 int clientUid,
-                                int *sessionId,
+                                audio_session_t *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
                                 sp<IMemory>& buffers,
@@ -198,9 +202,10 @@
         data.writeInt64(frameCount);
         track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
         data.writeInt32(lFlags);
+        data.writeInt32((int32_t) pid);
         data.writeInt32((int32_t) tid);
         data.writeInt32((int32_t) clientUid);
-        int lSessionId = AUDIO_SESSION_ALLOCATE;
+        audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
         }
@@ -220,7 +225,7 @@
             if (flags != NULL) {
                 *flags = lFlags;
             }
-            lSessionId = reply.readInt32();
+            lSessionId = (audio_session_t) reply.readInt32();
             if (sessionId != NULL) {
                 *sessionId = lSessionId;
             }
@@ -265,15 +270,17 @@
         return record;
     }
 
-    virtual uint32_t sampleRate(audio_io_handle_t output) const
+    virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output);
+        data.writeInt32((int32_t) ioHandle);
         remote()->transact(SAMPLE_RATE, data, &reply);
         return reply.readInt32();
     }
 
+    // RESERVED for channelCount()
+
     virtual audio_format_t format(audio_io_handle_t output) const
     {
         Parcel data, reply;
@@ -283,11 +290,11 @@
         return (audio_format_t) reply.readInt32();
     }
 
-    virtual size_t frameCount(audio_io_handle_t output) const
+    virtual size_t frameCount(audio_io_handle_t ioHandle) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output);
+        data.writeInt32((int32_t) ioHandle);
         remote()->transact(FRAME_COUNT, data, &reply);
         return reply.readInt64();
     }
@@ -612,10 +619,11 @@
         return (uint32_t) reply.readInt32();
     }
 
-    virtual audio_unique_id_t newAudioUniqueId()
+    virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+        data.writeInt32((int32_t) use);
         status_t status = remote()->transact(NEW_AUDIO_SESSION_ID, data, &reply);
         audio_unique_id_t id = AUDIO_SESSION_ALLOCATE;
         if (status == NO_ERROR) {
@@ -624,7 +632,7 @@
         return id;
     }
 
-    virtual void acquireAudioSessionId(int audioSession, int pid)
+    virtual void acquireAudioSessionId(audio_session_t audioSession, int pid)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -633,7 +641,7 @@
         remote()->transact(ACQUIRE_AUDIO_SESSION_ID, data, &reply);
     }
 
-    virtual void releaseAudioSessionId(int audioSession, int pid)
+    virtual void releaseAudioSessionId(audio_session_t audioSession, int pid)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -706,7 +714,7 @@
                                     const sp<IEffectClient>& client,
                                     int32_t priority,
                                     audio_io_handle_t output,
-                                    int sessionId,
+                                    audio_session_t sessionId,
                                     const String16& opPackageName,
                                     status_t *status,
                                     int *id,
@@ -753,7 +761,7 @@
         return effect;
     }
 
-    virtual status_t moveEffects(int session, audio_io_handle_t srcOutput,
+    virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
             audio_io_handle_t dstOutput)
     {
         Parcel data, reply;
@@ -910,6 +918,18 @@
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         return remote()->transact(SYSTEM_READY, data, &reply, IBinder::FLAG_ONEWAY);
     }
+    virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+        data.writeInt32((int32_t) ioHandle);
+        status_t status = remote()->transact(FRAME_COUNT_HAL, data, &reply);
+        if (status != NO_ERROR) {
+            return 0;
+        }
+        return reply.readInt64();
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -934,8 +954,9 @@
                 buffer = interface_cast<IMemory>(data.readStrongBinder());
             }
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
+            pid_t pid = (pid_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
-            int sessionId = data.readInt32();
+            audio_session_t sessionId = (audio_session_t) data.readInt32();
             int clientUid = data.readInt32();
             status_t status = NO_ERROR;
             sp<IAudioTrack> track;
@@ -946,7 +967,7 @@
             } else {
                 track = createTrack(
                         (audio_stream_type_t) streamType, sampleRate, format,
-                        channelMask, &frameCount, &flags, buffer, output, tid,
+                        channelMask, &frameCount, &flags, buffer, output, pid, tid,
                         &sessionId, clientUid, &status);
                 LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
             }
@@ -966,16 +987,18 @@
             const String16& opPackageName = data.readString16();
             size_t frameCount = data.readInt64();
             track_flags_t flags = (track_flags_t) data.readInt32();
+            pid_t pid = (pid_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
             int clientUid = data.readInt32();
-            int sessionId = data.readInt32();
+            audio_session_t sessionId = (audio_session_t) data.readInt32();
             size_t notificationFrames = data.readInt64();
             sp<IMemory> cblk;
             sp<IMemory> buffers;
             status_t status = NO_ERROR;
             sp<IAudioRecord> record = openRecord(input,
-                    sampleRate, format, channelMask, opPackageName, &frameCount, &flags, tid,
-                    clientUid, &sessionId, &notificationFrames, cblk, buffers, &status);
+                    sampleRate, format, channelMask, opPackageName, &frameCount, &flags,
+                    pid, tid, clientUid, &sessionId, &notificationFrames, cblk, buffers,
+                    &status);
             LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
             reply->writeInt64(frameCount);
             reply->writeInt32(flags);
@@ -992,6 +1015,9 @@
             reply->writeInt32( sampleRate((audio_io_handle_t) data.readInt32()) );
             return NO_ERROR;
         } break;
+
+        // RESERVED for channelCount()
+
         case FORMAT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             reply->writeInt32( format((audio_io_handle_t) data.readInt32()) );
@@ -1208,19 +1234,19 @@
         } break;
         case NEW_AUDIO_SESSION_ID: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(newAudioUniqueId());
+            reply->writeInt32(newAudioUniqueId((audio_unique_id_use_t) data.readInt32()));
             return NO_ERROR;
         } break;
         case ACQUIRE_AUDIO_SESSION_ID: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int audioSession = data.readInt32();
+            audio_session_t audioSession = (audio_session_t) data.readInt32();
             int pid = data.readInt32();
             acquireAudioSessionId(audioSession, pid);
             return NO_ERROR;
         } break;
         case RELEASE_AUDIO_SESSION_ID: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int audioSession = data.readInt32();
+            audio_session_t audioSession = (audio_session_t) data.readInt32();
             int pid = data.readInt32();
             releaseAudioSessionId(audioSession, pid);
             return NO_ERROR;
@@ -1266,7 +1292,7 @@
             sp<IEffectClient> client = interface_cast<IEffectClient>(data.readStrongBinder());
             int32_t priority = data.readInt32();
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
-            int sessionId = data.readInt32();
+            audio_session_t sessionId = (audio_session_t) data.readInt32();
             const String16 opPackageName = data.readString16();
             status_t status = NO_ERROR;
             int id = 0;
@@ -1283,7 +1309,7 @@
         } break;
         case MOVE_EFFECTS: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int session = data.readInt32();
+            audio_session_t session = (audio_session_t) data.readInt32();
             audio_io_handle_t srcOutput = (audio_io_handle_t) data.readInt32();
             audio_io_handle_t dstOutput = (audio_io_handle_t) data.readInt32();
             reply->writeInt32(moveEffects(session, srcOutput, dstOutput));
@@ -1354,7 +1380,7 @@
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             struct audio_patch patch;
             data.read(&patch, sizeof(struct audio_patch));
-            audio_patch_handle_t handle = {};
+            audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
             if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
                 ALOGE("b/23905951");
             }
@@ -1410,7 +1436,7 @@
         } break;
         case GET_AUDIO_HW_SYNC: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(getAudioHwSyncForSession((audio_session_t)data.readInt32()));
+            reply->writeInt32(getAudioHwSyncForSession((audio_session_t) data.readInt32()));
             return NO_ERROR;
         } break;
         case SYSTEM_READY: {
@@ -1418,6 +1444,11 @@
             systemReady();
             return NO_ERROR;
         } break;
+        case FRAME_COUNT_HAL: {
+            CHECK_INTERFACE(IAudioFlinger, data, reply);
+            reply->writeInt64( frameCountHAL((audio_io_handle_t) data.readInt32()) );
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 3429d36..8dca9e9 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -50,6 +50,7 @@
         data.writeInt32(ioDesc->mFormat);
         data.writeInt32(ioDesc->mChannelMask);
         data.writeInt64(ioDesc->mFrameCount);
+        data.writeInt64(ioDesc->mFrameCountHAL);
         data.writeInt32(ioDesc->mLatency);
         remote()->transact(IO_CONFIG_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
     }
@@ -73,6 +74,7 @@
             ioDesc->mFormat = (audio_format_t) data.readInt32();
             ioDesc->mChannelMask = (audio_channel_mask_t) data.readInt32();
             ioDesc->mFrameCount = data.readInt64();
+            ioDesc->mFrameCountHAL = data.readInt64();
             ioDesc->mLatency = data.readInt32();
             ioConfigChanged(event, ioDesc);
             return NO_ERROR;
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 76b5924..6405d6d 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -74,6 +74,8 @@
     START_AUDIO_SOURCE,
     STOP_AUDIO_SOURCE,
     SET_AUDIO_PORT_CALLBACK_ENABLED,
+    SET_MASTER_MONO,
+    GET_MASTER_MONO,
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -245,7 +247,7 @@
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(output);
         data.writeInt32((int32_t) stream);
-        data.writeInt32((int32_t)session);
+        data.writeInt32((int32_t) session);
         remote()->transact(START_OUTPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
@@ -258,7 +260,7 @@
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(output);
         data.writeInt32((int32_t) stream);
-        data.writeInt32((int32_t)session);
+        data.writeInt32((int32_t) session);
         remote()->transact(STOP_OUTPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
@@ -278,6 +280,7 @@
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
+                                     pid_t pid,
                                      uid_t uid,
                                      uint32_t samplingRate,
                                      audio_format_t format,
@@ -297,6 +300,7 @@
         }
         data.write(attr, sizeof(audio_attributes_t));
         data.writeInt32(session);
+        data.writeInt32(pid);
         data.writeInt32(uid);
         data.writeInt32(samplingRate);
         data.writeInt32(static_cast <uint32_t>(format));
@@ -418,7 +422,7 @@
     virtual status_t registerEffect(const effect_descriptor_t *desc,
                                         audio_io_handle_t io,
                                         uint32_t strategy,
-                                        int session,
+                                        audio_session_t session,
                                         int id)
     {
         Parcel data, reply;
@@ -480,7 +484,7 @@
         return reply.readInt32();
     }
 
-    virtual status_t queryDefaultPreProcessing(int audioSession,
+    virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
                                                effect_descriptor_t *descriptors,
                                                uint32_t *count)
     {
@@ -767,6 +771,37 @@
         status = (status_t)reply.readInt32();
         return status;
     }
+
+    virtual status_t setMasterMono(bool mono)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(static_cast<int32_t>(mono));
+        status_t status = remote()->transact(SET_MASTER_MONO, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        return static_cast<status_t>(reply.readInt32());
+    }
+
+    virtual status_t getMasterMono(bool *mono)
+    {
+        if (mono == nullptr) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+
+        status_t status = remote()->transact(GET_MASTER_MONO, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = static_cast<status_t>(reply.readInt32());
+        if (status == NO_ERROR) {
+            *mono = static_cast<bool>(reply.readInt32());
+        }
+        return status;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -926,14 +961,15 @@
             audio_attributes_t attr;
             data.read(&attr, sizeof(audio_attributes_t));
             audio_session_t session = (audio_session_t)data.readInt32();
+            pid_t pid = (pid_t)data.readInt32();
             uid_t uid = (uid_t)data.readInt32();
             uint32_t samplingRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
             audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
             audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
-            audio_io_handle_t input = {};
-            status_t status = getInputForAttr(&attr, &input, session, uid,
+            audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+            status_t status = getInputForAttr(&attr, &input, session, pid, uid,
                                               samplingRate, format, channelMask,
                                               flags, selectedDeviceId);
             reply->writeInt32(status);
@@ -1032,7 +1068,7 @@
             data.read(&desc, sizeof(effect_descriptor_t));
             audio_io_handle_t io = data.readInt32();
             uint32_t strategy = data.readInt32();
-            int session = data.readInt32();
+            audio_session_t session = (audio_session_t) data.readInt32();
             int id = data.readInt32();
             reply->writeInt32(static_cast <int32_t>(registerEffect(&desc,
                                                                    io,
@@ -1082,7 +1118,7 @@
 
         case QUERY_DEFAULT_PRE_PROCESSING: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            int audioSession = data.readInt32();
+            audio_session_t audioSession = (audio_session_t) data.readInt32();
             uint32_t count = data.readInt32();
             if (count > AudioEffect::kMaxPreProcessing) {
                 count = AudioEffect::kMaxPreProcessing;
@@ -1164,7 +1200,7 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             struct audio_patch patch;
             data.read(&patch, sizeof(struct audio_patch));
-            audio_patch_handle_t handle = {};
+            audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
             if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
                 ALOGE("b/23912202");
             }
@@ -1242,9 +1278,9 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
                     data.readStrongBinder());
-            audio_session_t session = {};
-            audio_io_handle_t ioHandle = {};
-            audio_devices_t device = {};
+            audio_session_t session = AUDIO_SESSION_NONE;
+            audio_io_handle_t ioHandle = AUDIO_IO_HANDLE_NONE;
+            audio_devices_t device = AUDIO_DEVICE_NONE;
             status_t status = acquireSoundTriggerSession(&session, &ioHandle, &device);
             reply->writeInt32(status);
             if (status == NO_ERROR) {
@@ -1311,6 +1347,25 @@
             return NO_ERROR;
         } break;
 
+        case SET_MASTER_MONO: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            bool mono = static_cast<bool>(data.readInt32());
+            status_t status = setMasterMono(mono);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        } break;
+
+        case GET_MASTER_MONO: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            bool mono;
+            status_t status = getMasterMono(&mono);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(static_cast<int32_t>(mono));
+            }
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioPolicyServiceClient.cpp b/media/libmedia/IAudioPolicyServiceClient.cpp
index 65cc7d6..5f931e5 100644
--- a/media/libmedia/IAudioPolicyServiceClient.cpp
+++ b/media/libmedia/IAudioPolicyServiceClient.cpp
@@ -30,9 +30,25 @@
 enum {
     PORT_LIST_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
     PATCH_LIST_UPDATE,
-    MIX_STATE_UPDATE
+    MIX_STATE_UPDATE,
+    RECORDING_CONFIGURATION_UPDATE
 };
 
+// ----------------------------------------------------------------------
+inline void readAudioConfigBaseFromParcel(const Parcel& data, audio_config_base_t *config) {
+    config->sample_rate = data.readUint32();
+    config->channel_mask = (audio_channel_mask_t) data.readInt32();
+    config->format = (audio_format_t) data.readInt32();
+}
+
+inline void writeAudioConfigBaseToParcel(Parcel& data, const audio_config_base_t *config)
+{
+    data.writeUint32(config->sample_rate);
+    data.writeInt32((int32_t) config->channel_mask);
+    data.writeInt32((int32_t) config->format);
+}
+
+// ----------------------------------------------------------------------
 class BpAudioPolicyServiceClient : public BpInterface<IAudioPolicyServiceClient>
 {
 public:
@@ -63,6 +79,20 @@
         data.writeInt32(state);
         remote()->transact(MIX_STATE_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
     }
+
+    void onRecordingConfigurationUpdate(int event, audio_session_t session,
+            audio_source_t source, const audio_config_base_t *clientConfig,
+            const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor());
+        data.writeInt32(event);
+        data.writeInt32(session);
+        data.writeInt32(source);
+        writeAudioConfigBaseToParcel(data, clientConfig);
+        writeAudioConfigBaseToParcel(data, deviceConfig);
+        data.writeInt32(patchHandle);
+        remote()->transact(RECORDING_CONFIGURATION_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyServiceClient, "android.media.IAudioPolicyServiceClient");
@@ -89,7 +119,21 @@
             int32_t state = data.readInt32();
             onDynamicPolicyMixStateUpdate(regId, state);
             return NO_ERROR;
-    }
+        } break;
+    case RECORDING_CONFIGURATION_UPDATE: {
+            CHECK_INTERFACE(IAudioPolicyServiceClient, data, reply);
+            int event = (int) data.readInt32();
+            audio_session_t session = (audio_session_t) data.readInt32();
+            audio_source_t source = (audio_source_t) data.readInt32();
+            audio_config_base_t clientConfig;
+            audio_config_base_t deviceConfig;
+            readAudioConfigBaseFromParcel(data, &clientConfig);
+            readAudioConfigBaseFromParcel(data, &deviceConfig);
+            audio_patch_handle_t patchHandle = (audio_patch_handle_t) data.readInt32();
+            onRecordingConfigurationUpdate(event, session, source, &clientConfig, &deviceConfig,
+                    patchHandle);
+            return NO_ERROR;
+        } break;
     default:
         return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 9d80753..ae66436 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -42,7 +42,7 @@
     {
     }
 
-    virtual status_t start(int /*AudioSystem::sync_event_t*/ event, int triggerSession)
+    virtual status_t start(int /*AudioSystem::sync_event_t*/ event, audio_session_t triggerSession)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
@@ -77,7 +77,7 @@
         case START: {
             CHECK_INTERFACE(IAudioRecord, data, reply);
             int /*AudioSystem::sync_event_t*/ event = data.readInt32();
-            int triggerSession = data.readInt32();
+            audio_session_t triggerSession = (audio_session_t) data.readInt32();
             reply->writeInt32(start(event, triggerSession));
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index 651cb61..636e3bb 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -36,9 +36,6 @@
     RESERVED, // was MUTE
     PAUSE,
     ATTACH_AUX_EFFECT,
-    ALLOCATE_TIMED_BUFFER,
-    QUEUE_TIMED_BUFFER,
-    SET_MEDIA_TIME_TRANSFORM,
     SET_PARAMETERS,
     GET_TIMESTAMP,
     SIGNAL,
@@ -115,55 +112,6 @@
         return status;
     }
 
-    virtual status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
-        data.writeInt64(size);
-        status_t status = remote()->transact(ALLOCATE_TIMED_BUFFER,
-                                             data, &reply);
-        if (status == NO_ERROR) {
-            status = reply.readInt32();
-            if (status == NO_ERROR) {
-                *buffer = interface_cast<IMemory>(reply.readStrongBinder());
-                if (*buffer != 0 && (*buffer)->pointer() == NULL) {
-                    (*buffer).clear();
-                }
-            }
-        }
-        return status;
-    }
-
-    virtual status_t queueTimedBuffer(const sp<IMemory>& buffer,
-                                      int64_t pts) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(buffer));
-        data.writeInt64(pts);
-        status_t status = remote()->transact(QUEUE_TIMED_BUFFER,
-                                             data, &reply);
-        if (status == NO_ERROR) {
-            status = reply.readInt32();
-        }
-        return status;
-    }
-
-    virtual status_t setMediaTimeTransform(const LinearTransform& xform,
-                                           int target) {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
-        data.writeInt64(xform.a_zero);
-        data.writeInt64(xform.b_zero);
-        data.writeInt32(xform.a_to_b_numer);
-        data.writeInt32(xform.a_to_b_denom);
-        data.writeInt32(target);
-        status_t status = remote()->transact(SET_MEDIA_TIME_TRANSFORM,
-                                             data, &reply);
-        if (status == NO_ERROR) {
-            status = reply.readInt32();
-        }
-        return status;
-    }
-
     virtual status_t setParameters(const String8& keyValuePairs) {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
@@ -235,35 +183,6 @@
             reply->writeInt32(attachAuxEffect(data.readInt32()));
             return NO_ERROR;
         } break;
-        case ALLOCATE_TIMED_BUFFER: {
-            CHECK_INTERFACE(IAudioTrack, data, reply);
-            sp<IMemory> buffer;
-            status_t status = allocateTimedBuffer(data.readInt64(), &buffer);
-            reply->writeInt32(status);
-            if (status == NO_ERROR) {
-                reply->writeStrongBinder(IInterface::asBinder(buffer));
-            }
-            return NO_ERROR;
-        } break;
-        case QUEUE_TIMED_BUFFER: {
-            CHECK_INTERFACE(IAudioTrack, data, reply);
-            sp<IMemory> buffer = interface_cast<IMemory>(
-                data.readStrongBinder());
-            uint64_t pts = data.readInt64();
-            reply->writeInt32(queueTimedBuffer(buffer, pts));
-            return NO_ERROR;
-        } break;
-        case SET_MEDIA_TIME_TRANSFORM: {
-            CHECK_INTERFACE(IAudioTrack, data, reply);
-            LinearTransform xform;
-            xform.a_zero = data.readInt64();
-            xform.b_zero = data.readInt64();
-            xform.a_to_b_numer = data.readInt32();
-            xform.a_to_b_denom = data.readInt32();
-            int target = data.readInt32();
-            reply->writeInt32(setMediaTimeTransform(xform, target));
-            return NO_ERROR;
-        } break;
         case SET_PARAMETERS: {
             CHECK_INTERFACE(IAudioTrack, data, reply);
             String8 keyValuePairs(data.readString8());
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index 22f8af7..26dd2c9 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -95,18 +95,20 @@
     }
 
     virtual ssize_t decrypt(
-            bool secure,
+            DestinationType dstType,
             const uint8_t key[16],
             const uint8_t iv[16],
-            CryptoPlugin::Mode mode,
+            CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
             const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
             AString *errorDetailMsg) {
         Parcel data, reply;
         data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
-        data.writeInt32(secure);
+        data.writeInt32((int32_t)dstType);
         data.writeInt32(mode);
+        data.writeInt32(pattern.mEncryptBlocks);
+        data.writeInt32(pattern.mSkipBlocks);
 
         static const uint8_t kDummy[16] = { 0 };
 
@@ -134,8 +136,12 @@
         data.writeInt32(numSubSamples);
         data.write(subSamples, sizeof(CryptoPlugin::SubSample) * numSubSamples);
 
-        if (secure) {
+        if (dstType == kDestinationTypeNativeHandle) {
+            data.writeNativeHandle(static_cast<native_handle_t *>(dstPtr));
+        } else if (dstType == kDestinationTypeOpaqueHandle) {
             data.writeInt64(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(dstPtr)));
+        } else {
+            dstType = kDestinationTypeVmPointer;
         }
 
         remote()->transact(DECRYPT, data, &reply);
@@ -146,7 +152,7 @@
             errorDetailMsg->setTo(reply.readCString());
         }
 
-        if (!secure && result >= 0) {
+        if (dstType == kDestinationTypeVmPointer && result >= 0) {
             reply.read(dstPtr, result);
         }
 
@@ -261,7 +267,11 @@
             CHECK_INTERFACE(ICrypto, data, reply);
 
             const char *mime = data.readCString();
-            reply->writeInt32(requiresSecureDecoderComponent(mime));
+            if (mime == NULL) {
+                reply->writeInt32(BAD_VALUE);
+            } else {
+                reply->writeInt32(requiresSecureDecoderComponent(mime));
+            }
 
             return OK;
         }
@@ -270,8 +280,11 @@
         {
             CHECK_INTERFACE(ICrypto, data, reply);
 
-            bool secure = data.readInt32() != 0;
+            DestinationType dstType = (DestinationType)data.readInt32();
             CryptoPlugin::Mode mode = (CryptoPlugin::Mode)data.readInt32();
+            CryptoPlugin::Pattern pattern;
+            pattern.mEncryptBlocks = data.readInt32();
+            pattern.mSkipBlocks = data.readInt32();
 
             uint8_t key[16];
             data.read(key, sizeof(key));
@@ -282,6 +295,10 @@
             size_t totalSize = data.readInt32();
             sp<IMemory> sharedBuffer =
                 interface_cast<IMemory>(data.readStrongBinder());
+            if (sharedBuffer == NULL) {
+                reply->writeInt32(BAD_VALUE);
+                return OK;
+            }
             int32_t offset = data.readInt32();
 
             int32_t numSubSamples = data.readInt32();
@@ -293,11 +310,17 @@
                     subSamples,
                     sizeof(CryptoPlugin::SubSample) * numSubSamples);
 
-            void *secureBufferId, *dstPtr;
-            if (secure) {
+            native_handle_t *nativeHandle = NULL;
+            void *secureBufferId = NULL, *dstPtr;
+            if (dstType == kDestinationTypeNativeHandle) {
+                nativeHandle = data.readNativeHandle();
+                dstPtr = static_cast<void *>(nativeHandle);
+            } else if (dstType == kDestinationTypeOpaqueHandle) {
                 secureBufferId = reinterpret_cast<void *>(static_cast<uintptr_t>(data.readInt64()));
+                dstPtr = secureBufferId;
             } else {
-                dstPtr = calloc(1, totalSize);
+                dstType = kDestinationTypeVmPointer;
+                dstPtr = malloc(totalSize);
             }
 
             AString errorDetailMsg;
@@ -327,13 +350,13 @@
                 result = -EINVAL;
             } else {
                 result = decrypt(
-                    secure,
+                    dstType,
                     key,
                     iv,
-                    mode,
+                    mode, pattern,
                     sharedBuffer, offset,
                     subSamples, numSubSamples,
-                    secure ? secureBufferId : dstPtr,
+                    dstPtr,
                     &errorDetailMsg);
             }
 
@@ -343,13 +366,21 @@
                 reply->writeCString(errorDetailMsg.c_str());
             }
 
-            if (!secure) {
+            if (dstType == kDestinationTypeVmPointer) {
                 if (result >= 0) {
                     CHECK_LE(result, static_cast<ssize_t>(totalSize));
                     reply->write(dstPtr, result);
                 }
                 free(dstPtr);
                 dstPtr = NULL;
+            } else if (dstType == kDestinationTypeNativeHandle) {
+                int err;
+                if ((err = native_handle_close(nativeHandle)) < 0) {
+                    ALOGW("secure buffer native_handle_close failed: %d", err);
+                }
+                if ((err = native_handle_delete(nativeHandle)) < 0) {
+                    ALOGW("secure buffer native_handle_delete failed: %d", err);
+                }
             }
 
             delete[] subSamples;
diff --git a/media/libmedia/IDataSource.cpp b/media/libmedia/IDataSource.cpp
index 76d1d68..51c9938 100644
--- a/media/libmedia/IDataSource.cpp
+++ b/media/libmedia/IDataSource.cpp
@@ -23,6 +23,7 @@
 
 #include <binder/IMemory.h>
 #include <binder/Parcel.h>
+#include <drm/drm_framework_common.h>
 #include <media/stagefright/foundation/ADebug.h>
 
 namespace android {
@@ -32,6 +33,9 @@
     READ_AT,
     GET_SIZE,
     CLOSE,
+    GET_FLAGS,
+    TO_STRING,
+    DRM_INITIALIZATION,
 };
 
 struct BpDataSource : public BpInterface<IDataSource> {
@@ -68,6 +72,61 @@
         data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
         remote()->transact(CLOSE, data, &reply);
     }
+
+    virtual uint32_t getFlags() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
+        remote()->transact(GET_FLAGS, data, &reply);
+        return reply.readUint32();
+    }
+
+    virtual String8 toString() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
+        remote()->transact(TO_STRING, data, &reply);
+        return reply.readString8();
+    }
+
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
+        if (mime == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.writeCString(mime);
+        }
+        remote()->transact(DRM_INITIALIZATION, data, &reply);
+        sp<DecryptHandle> handle;
+        if (reply.dataAvail() != 0) {
+            handle = new DecryptHandle();
+            handle->decryptId = reply.readInt32();
+            handle->mimeType = reply.readString8();
+            handle->decryptApiType = reply.readInt32();
+            handle->status = reply.readInt32();
+
+            const int bufferLength = data.readInt32();
+            if (bufferLength != -1) {
+                handle->decryptInfo = new DecryptInfo();
+                handle->decryptInfo->decryptBufferLength = bufferLength;
+            }
+
+            size_t size = data.readInt32();
+            for (size_t i = 0; i < size; ++i) {
+                DrmCopyControl key = (DrmCopyControl)data.readInt32();
+                int value = data.readInt32();
+                handle->copyControlVector.add(key, value);
+            }
+
+            size = data.readInt32();
+            for (size_t i = 0; i < size; ++i) {
+                String8 key = data.readString8();
+                String8 value = data.readString8();
+                handle->extendedData.add(key, value);
+            }
+        }
+        return handle;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(DataSource, "android.media.IDataSource");
@@ -100,6 +159,53 @@
             close();
             return NO_ERROR;
         } break;
+        case GET_FLAGS: {
+            CHECK_INTERFACE(IDataSource, data, reply);
+            reply->writeUint32(getFlags());
+            return NO_ERROR;
+        } break;
+        case TO_STRING: {
+            CHECK_INTERFACE(IDataSource, data, reply);
+            reply->writeString8(toString());
+            return NO_ERROR;
+        } break;
+        case DRM_INITIALIZATION: {
+            CHECK_INTERFACE(IDataSource, data, reply);
+            const char *mime = NULL;
+            const int32_t flag = data.readInt32();
+            if (flag != 0) {
+                mime = data.readCString();
+            }
+            sp<DecryptHandle> handle = DrmInitialization(mime);
+            if (handle != NULL) {
+                reply->writeInt32(handle->decryptId);
+                reply->writeString8(handle->mimeType);
+                reply->writeInt32(handle->decryptApiType);
+                reply->writeInt32(handle->status);
+
+                if (handle->decryptInfo != NULL) {
+                    reply->writeInt32(handle->decryptInfo->decryptBufferLength);
+                } else {
+                    reply->writeInt32(-1);
+                }
+
+                size_t size = handle->copyControlVector.size();
+                reply->writeInt32(size);
+                for (size_t i = 0; i < size; ++i) {
+                    reply->writeInt32(handle->copyControlVector.keyAt(i));
+                    reply->writeInt32(handle->copyControlVector.valueAt(i));
+                }
+
+                size = handle->extendedData.size();
+                reply->writeInt32(size);
+                for (size_t i = 0; i < size; ++i) {
+                    reply->writeString8(handle->extendedData.keyAt(i));
+                    reply->writeString8(handle->extendedData.valueAt(i));
+                }
+            }
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IDrm.cpp b/media/libmedia/IDrm.cpp
index 7c709cd..7f131f4 100644
--- a/media/libmedia/IDrm.cpp
+++ b/media/libmedia/IDrm.cpp
@@ -54,7 +54,6 @@
     SIGN_RSA,
     VERIFY,
     SET_LISTENER,
-    UNPROVISION_DEVICE,
     GET_SECURE_STOP,
     RELEASE_ALL_SECURE_STOPS
 };
@@ -277,18 +276,6 @@
         return reply.readInt32();
     }
 
-    virtual status_t unprovisionDevice() {
-        Parcel data, reply;
-        data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
-
-        status_t status = remote()->transact(UNPROVISION_DEVICE, data, &reply);
-        if (status != OK) {
-            return status;
-        }
-
-        return reply.readInt32();
-    }
-
     virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
@@ -749,14 +736,6 @@
             return OK;
         }
 
-        case UNPROVISION_DEVICE:
-        {
-            CHECK_INTERFACE(IDrm, data, reply);
-            status_t result = unprovisionDevice();
-            reply->writeInt32(result);
-            return OK;
-        }
-
         case GET_SECURE_STOPS:
         {
             CHECK_INTERFACE(IDrm, data, reply);
diff --git a/media/libmedia/IMediaCodecList.cpp b/media/libmedia/IMediaCodecList.cpp
index e2df104..737f50c 100644
--- a/media/libmedia/IMediaCodecList.cpp
+++ b/media/libmedia/IMediaCodecList.cpp
@@ -157,6 +157,10 @@
         {
             CHECK_INTERFACE(IMediaCodecList, data, reply);
             const char *type = data.readCString();
+            if (type == NULL) {
+                reply->writeInt32(NAME_NOT_FOUND);
+                return NO_ERROR;
+            }
             bool isEncoder = static_cast<bool>(data.readInt32());
             size_t startIndex = static_cast<size_t>(data.readInt32());
             ssize_t index = findCodecByType(type, isEncoder, startIndex);
@@ -172,6 +176,10 @@
         {
             CHECK_INTERFACE(IMediaCodecList, data, reply);
             const char *name = data.readCString();
+            if (name == NULL) {
+                reply->writeInt32(NAME_NOT_FOUND);
+                return NO_ERROR;
+            }
             ssize_t index = findCodecByName(name);
             if (index > INT32_MAX || index < 0) {
                 index = NAME_NOT_FOUND;
diff --git a/media/libmedia/IMediaCodecService.cpp b/media/libmedia/IMediaCodecService.cpp
new file mode 100644
index 0000000..dcf2b27
--- /dev/null
+++ b/media/libmedia/IMediaCodecService.cpp
@@ -0,0 +1,72 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "IMediaCodecService"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <binder/Parcel.h>
+#include <media/IMediaCodecService.h>
+
+namespace android {
+
+enum {
+    GET_OMX = IBinder::FIRST_CALL_TRANSACTION
+};
+
+class BpMediaCodecService : public BpInterface<IMediaCodecService>
+{
+public:
+    BpMediaCodecService(const sp<IBinder>& impl)
+        : BpInterface<IMediaCodecService>(impl)
+    {
+    }
+
+    virtual sp<IOMX> getOMX() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaCodecService::getInterfaceDescriptor());
+        remote()->transact(GET_OMX, data, &reply);
+        return interface_cast<IOMX>(reply.readStrongBinder());
+    }
+
+};
+
+IMPLEMENT_META_INTERFACE(MediaCodecService, "android.media.IMediaCodecService");
+
+// ----------------------------------------------------------------------
+
+status_t BnMediaCodecService::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch (code) {
+
+        case GET_OMX: {
+            CHECK_INTERFACE(IMediaCodecService, data, reply);
+            sp<IOMX> omx = getOMX();
+            reply->writeStrongBinder(IInterface::asBinder(omx));
+            return NO_ERROR;
+        }
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+} // namespace android
diff --git a/media/libmedia/IMediaDrmService.cpp b/media/libmedia/IMediaDrmService.cpp
new file mode 100644
index 0000000..9b6ecfd
--- /dev/null
+++ b/media/libmedia/IMediaDrmService.cpp
@@ -0,0 +1,88 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+#include <binder/IMemory.h>
+#include <media/ICrypto.h>
+#include <media/IDrm.h>
+#include <media/IMediaDrmService.h>
+
+#include <utils/Errors.h>  // for status_t
+#include <utils/String8.h>
+
+namespace android {
+
+enum {
+    MAKE_CRYPTO = IBinder::FIRST_CALL_TRANSACTION,
+    MAKE_DRM,
+};
+
+class BpMediaDrmService: public BpInterface<IMediaDrmService>
+{
+public:
+    BpMediaDrmService(const sp<IBinder>& impl)
+        : BpInterface<IMediaDrmService>(impl)
+    {
+    }
+
+    virtual sp<ICrypto> makeCrypto() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaDrmService::getInterfaceDescriptor());
+        remote()->transact(MAKE_CRYPTO, data, &reply);
+        return interface_cast<ICrypto>(reply.readStrongBinder());
+    }
+
+    virtual sp<IDrm> makeDrm() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaDrmService::getInterfaceDescriptor());
+        remote()->transact(MAKE_DRM, data, &reply);
+        return interface_cast<IDrm>(reply.readStrongBinder());
+    }
+
+};
+
+IMPLEMENT_META_INTERFACE(MediaDrmService, "android.media.IMediaDrmService");
+
+// ----------------------------------------------------------------------
+
+status_t BnMediaDrmService::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch (code) {
+        case MAKE_CRYPTO: {
+            CHECK_INTERFACE(IMediaDrmService, data, reply);
+            sp<ICrypto> crypto = makeCrypto();
+            reply->writeStrongBinder(IInterface::asBinder(crypto));
+            return NO_ERROR;
+        } break;
+        case MAKE_DRM: {
+            CHECK_INTERFACE(IMediaDrmService, data, reply);
+            sp<IDrm> drm = makeDrm();
+            reply->writeStrongBinder(IInterface::asBinder(drm));
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+} // namespace android
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
new file mode 100644
index 0000000..e8ad75b
--- /dev/null
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "BpMediaExtractor"
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/IPCThreadState.h>
+#include <binder/Parcel.h>
+#include <media/IMediaExtractor.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+enum {
+    COUNTTRACKS = IBinder::FIRST_CALL_TRANSACTION,
+    GETTRACK,
+    GETTRACKMETADATA,
+    GETMETADATA,
+    FLAGS,
+    SETDRMFLAG,
+    GETDRMFLAG,
+    GETDRMTRACKINFO,
+    SETUID,
+    NAME
+};
+
+class BpMediaExtractor : public BpInterface<IMediaExtractor> {
+public:
+    BpMediaExtractor(const sp<IBinder>& impl)
+        : BpInterface<IMediaExtractor>(impl)
+    {
+    }
+
+    virtual size_t countTracks() {
+        ALOGV("countTracks");
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+        status_t ret = remote()->transact(COUNTTRACKS, data, &reply);
+        size_t numTracks = 0;
+        if (ret == NO_ERROR) {
+            numTracks = reply.readUint32();
+        }
+        return numTracks;
+    }
+    virtual sp<IMediaSource> getTrack(size_t index) {
+        ALOGV("getTrack(%zu)", index);
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+        data.writeUint32(index);
+        status_t ret = remote()->transact(GETTRACK, data, &reply);
+        if (ret == NO_ERROR) {
+            return interface_cast<IMediaSource>(reply.readStrongBinder());
+        }
+        return NULL;
+    }
+
+    virtual sp<MetaData> getTrackMetaData(
+            size_t index, uint32_t flags) {
+        ALOGV("getTrackMetaData(%zu, %u)", index, flags);
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+        data.writeUint32(index);
+        data.writeUint32(flags);
+        status_t ret = remote()->transact(GETTRACKMETADATA, data, &reply);
+        if (ret == NO_ERROR) {
+            return MetaData::createFromParcel(reply);
+        }
+        return NULL;
+    }
+
+    virtual sp<MetaData> getMetaData() {
+        ALOGV("getMetaData");
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+        status_t ret = remote()->transact(GETMETADATA, data, &reply);
+        if (ret == NO_ERROR) {
+            return MetaData::createFromParcel(reply);
+        }
+        return NULL;
+    }
+
+    virtual uint32_t flags() const {
+        ALOGV("flags NOT IMPLEMENTED");
+        return 0;
+    }
+
+    virtual void setDrmFlag(bool flag __unused) {
+        ALOGV("setDrmFlag NOT IMPLEMENTED");
+    }
+    virtual bool getDrmFlag() {
+        ALOGV("getDrmFlag NOT IMPLEMENTED");
+       return false;
+    }
+    virtual char* getDrmTrackInfo(size_t trackID __unused, int *len __unused) {
+        ALOGV("getDrmTrackInfo NOT IMPLEMENTED");
+        return NULL;
+    }
+    virtual void setUID(uid_t uid __unused) {
+        ALOGV("setUID NOT IMPLEMENTED");
+    }
+
+    virtual const char * name() {
+        ALOGV("name NOT IMPLEMENTED");
+        return NULL;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(MediaExtractor, "android.media.IMediaExtractor");
+
+#undef LOG_TAG
+#define LOG_TAG "BnMediaExtractor"
+
+status_t BnMediaExtractor::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch (code) {
+        case COUNTTRACKS: {
+            ALOGV("countTracks");
+            CHECK_INTERFACE(IMediaExtractor, data, reply);
+            size_t numTracks = countTracks();
+            if (numTracks > INT32_MAX) {
+                numTracks = 0;
+            }
+            reply->writeUint32(uint32_t(numTracks));
+            return NO_ERROR;
+        }
+        case GETTRACK: {
+            ALOGV("getTrack()");
+            CHECK_INTERFACE(IMediaExtractor, data, reply);
+            uint32_t idx;
+            if (data.readUint32(&idx) == NO_ERROR) {
+                const sp<IMediaSource> track = getTrack(size_t(idx));
+                registerMediaSource(this, track);
+                return reply->writeStrongBinder(IInterface::asBinder(track));
+            }
+            return UNKNOWN_ERROR;
+        }
+        case GETTRACKMETADATA: {
+            ALOGV("getTrackMetaData");
+            CHECK_INTERFACE(IMediaExtractor, data, reply);
+            uint32_t idx;
+            uint32_t flags;
+            if (data.readUint32(&idx) == NO_ERROR &&
+                    data.readUint32(&flags) == NO_ERROR) {
+                sp<MetaData> meta = getTrackMetaData(idx, flags);
+                meta->writeToParcel(*reply);
+                return NO_ERROR;
+            }
+            return UNKNOWN_ERROR;
+        }
+        case GETMETADATA: {
+            ALOGV("getMetaData");
+            CHECK_INTERFACE(IMediaExtractor, data, reply);
+            sp<MetaData> meta = getMetaData();
+            if (meta != NULL) {
+                meta->writeToParcel(*reply);
+                return NO_ERROR;
+            }
+            return UNKNOWN_ERROR;
+        }
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+typedef struct {
+    String8 mime;
+    String8 name;
+    String8 sourceDescription;
+    pid_t owner;
+    wp<IMediaExtractor> extractor;
+    Vector<wp<IMediaSource>> tracks;
+    Vector<String8> trackDescriptions;
+    String8 toString() const;
+} ExtractorInstance;
+
+String8 ExtractorInstance::toString() const {
+    String8 str = name;
+    str.append(" for mime ");
+    str.append(mime);
+    str.append(", source ");
+    str.append(sourceDescription);
+    str.append(String8::format(", pid %d: ", owner));
+    if (extractor.promote() == NULL) {
+        str.append("deleted\n");
+    } else {
+        str.append("active\n");
+    }
+    for (size_t i = 0; i < tracks.size(); i++) {
+        const String8 desc = trackDescriptions.itemAt(i);
+        str.appendFormat("    track {%s} ", desc.string());
+        const sp<IMediaSource> source = tracks.itemAt(i).promote();
+        if (source == NULL) {
+            str.append(": deleted\n");
+        } else {
+            str.appendFormat(": active\n");
+        }
+    }
+    return str;
+}
+
+static Vector<ExtractorInstance> sExtractors;
+static Mutex sExtractorsLock;
+
+void registerMediaSource(
+        const sp<IMediaExtractor> &ex,
+        const sp<IMediaSource> &source) {
+    Mutex::Autolock lock(sExtractorsLock);
+    for (size_t i = 0; i < sExtractors.size(); i++) {
+        ExtractorInstance &instance = sExtractors.editItemAt(i);
+        sp<IMediaExtractor> extractor = instance.extractor.promote();
+        if (extractor != NULL && extractor == ex) {
+            if (instance.tracks.size() > 5) {
+                instance.tracks.resize(5);
+            }
+            instance.tracks.push_front(source);
+            instance.trackDescriptions.add(source->getFormat()->toString());
+            break;
+        }
+    }
+}
+
+void registerMediaExtractor(
+        const sp<IMediaExtractor> &extractor,
+        const sp<DataSource> &source,
+        const char *mime) {
+    ExtractorInstance ex;
+    ex.mime = mime == NULL ? "NULL" : mime;
+    ex.name = extractor->name();
+    ex.sourceDescription = source->toString();
+    ex.owner = IPCThreadState::self()->getCallingPid();
+    ex.extractor = extractor;
+
+    {
+        Mutex::Autolock lock(sExtractorsLock);
+        if (sExtractors.size() > 10) {
+            sExtractors.resize(10);
+        }
+        sExtractors.push_front(ex);
+    }
+}
+
+status_t dumpExtractors(int fd, const Vector<String16>&) {
+    String8 out;
+    out.append("Recent extractors, most recent first:\n");
+    {
+        Mutex::Autolock lock(sExtractorsLock);
+        for (size_t i = 0; i < sExtractors.size(); i++) {
+            const ExtractorInstance &instance = sExtractors.itemAt(i);
+            out.append("  ");
+            out.append(instance.toString());
+        }
+    }
+    write(fd, out.string(), out.size());
+    return OK;
+}
+
+
+}  // namespace android
+
diff --git a/media/libmedia/IMediaExtractorService.cpp b/media/libmedia/IMediaExtractorService.cpp
new file mode 100644
index 0000000..dcbbde2
--- /dev/null
+++ b/media/libmedia/IMediaExtractorService.cpp
@@ -0,0 +1,87 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "IMediaExtractorService"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <binder/Parcel.h>
+#include <media/IMediaExtractorService.h>
+
+namespace android {
+
+enum {
+    MAKE_EXTRACTOR = IBinder::FIRST_CALL_TRANSACTION
+};
+
+class BpMediaExtractorService : public BpInterface<IMediaExtractorService>
+{
+public:
+    BpMediaExtractorService(const sp<IBinder>& impl)
+        : BpInterface<IMediaExtractorService>(impl)
+    {
+    }
+
+    virtual sp<IMediaExtractor> makeExtractor(const sp<IDataSource> &source, const char *mime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaExtractorService::getInterfaceDescriptor());
+        data.writeStrongBinder(IInterface::asBinder(source));
+        if (mime != NULL) {
+            data.writeCString(mime);
+        }
+        status_t ret = remote()->transact(MAKE_EXTRACTOR, data, &reply);
+        if (ret == NO_ERROR) {
+            return interface_cast<IMediaExtractor>(reply.readStrongBinder());
+        }
+        return NULL;
+    }
+
+};
+
+IMPLEMENT_META_INTERFACE(MediaExtractorService, "android.media.IMediaExtractorService");
+
+// ----------------------------------------------------------------------
+
+status_t BnMediaExtractorService::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch (code) {
+
+        case MAKE_EXTRACTOR: {
+            CHECK_INTERFACE(IMediaExtractorService, data, reply);
+            sp<IBinder> b;
+            status_t ret = data.readStrongBinder(&b);
+            if (ret != NO_ERROR || b == NULL) {
+                ALOGE("Error reading source from parcel");
+                return ret;
+            }
+            sp<IDataSource> source = interface_cast<IDataSource>(b);
+            const char *mime = data.readCString();
+            sp<IMediaExtractor> ex = makeExtractor(source, mime);
+            reply->writeStrongBinder(IInterface::asBinder(ex));
+            return NO_ERROR;
+        }
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+} // namespace android
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 9765f0d..0bee8d3 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -224,6 +224,11 @@
 
             const char* srcUrl = data.readCString();
 
+            if (httpService == NULL || srcUrl == NULL) {
+                reply->writeInt32(BAD_VALUE);
+                return NO_ERROR;
+            }
+
             KeyedVector<String8, String8> headers;
             size_t numHeaders = (size_t) data.readInt64();
             for (size_t i = 0; i < numHeaders; ++i) {
@@ -240,7 +245,7 @@
         } break;
         case SET_DATA_SOURCE_FD: {
             CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
-            int fd = dup(data.readFileDescriptor());
+            int fd = data.readFileDescriptor();
             int64_t offset = data.readInt64();
             int64_t length = data.readInt64();
             reply->writeInt32(setDataSource(fd, offset, length));
@@ -250,7 +255,11 @@
             CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
             sp<IDataSource> source =
                 interface_cast<IDataSource>(data.readStrongBinder());
-            reply->writeInt32(setDataSource(source));
+            if (source == NULL) {
+                reply->writeInt32(BAD_VALUE);
+            } else {
+                reply->writeInt32(setDataSource(source));
+            }
             return NO_ERROR;
         } break;
         case GET_FRAME_AT_TIME: {
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 942aec3..519a1fd 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -444,6 +444,10 @@
             }
 
             const char* url = data.readCString();
+            if (url == NULL) {
+                reply->writeInt32(BAD_VALUE);
+                return NO_ERROR;
+            }
             KeyedVector<String8, String8> headers;
             int32_t numHeaders = data.readInt32();
             for (int i = 0; i < numHeaders; ++i) {
@@ -467,14 +471,22 @@
             CHECK_INTERFACE(IMediaPlayer, data, reply);
             sp<IStreamSource> source =
                 interface_cast<IStreamSource>(data.readStrongBinder());
-            reply->writeInt32(setDataSource(source));
+            if (source == NULL) {
+                reply->writeInt32(BAD_VALUE);
+            } else {
+                reply->writeInt32(setDataSource(source));
+            }
             return NO_ERROR;
         }
         case SET_DATA_SOURCE_CALLBACK: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
             sp<IDataSource> source =
                 interface_cast<IDataSource>(data.readStrongBinder());
-            reply->writeInt32(setDataSource(source));
+            if (source == NULL) {
+                reply->writeInt32(BAD_VALUE);
+            } else {
+                reply->writeInt32(setDataSource(source));
+            }
             return NO_ERROR;
         }
         case SET_VIDEO_SURFACETEXTURE: {
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index 05f8670..7590c1b 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -20,8 +20,6 @@
 
 #include <binder/Parcel.h>
 #include <binder/IMemory.h>
-#include <media/ICrypto.h>
-#include <media/IDrm.h>
 #include <media/IHDCP.h>
 #include <media/IMediaCodecList.h>
 #include <media/IMediaHTTPService.h>
@@ -42,8 +40,6 @@
     CREATE_MEDIA_RECORDER,
     CREATE_METADATA_RETRIEVER,
     GET_OMX,
-    MAKE_CRYPTO,
-    MAKE_DRM,
     MAKE_HDCP,
     ADD_BATTERY_DATA,
     PULL_BATTERY_DATA,
@@ -68,7 +64,7 @@
     }
 
     virtual sp<IMediaPlayer> create(
-            const sp<IMediaPlayerClient>& client, int audioSessionId) {
+            const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId) {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
         data.writeStrongBinder(IInterface::asBinder(client));
@@ -94,20 +90,6 @@
         return interface_cast<IOMX>(reply.readStrongBinder());
     }
 
-    virtual sp<ICrypto> makeCrypto() {
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
-        remote()->transact(MAKE_CRYPTO, data, &reply);
-        return interface_cast<ICrypto>(reply.readStrongBinder());
-    }
-
-    virtual sp<IDrm> makeDrm() {
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
-        remote()->transact(MAKE_DRM, data, &reply);
-        return interface_cast<IDrm>(reply.readStrongBinder());
-    }
-
     virtual sp<IHDCP> makeHDCP(bool createEncryptionModule) {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
@@ -161,7 +143,7 @@
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
             sp<IMediaPlayerClient> client =
                 interface_cast<IMediaPlayerClient>(data.readStrongBinder());
-            int audioSessionId = data.readInt32();
+            audio_session_t audioSessionId = (audio_session_t) data.readInt32();
             sp<IMediaPlayer> player = create(client, audioSessionId);
             reply->writeStrongBinder(IInterface::asBinder(player));
             return NO_ERROR;
@@ -185,18 +167,6 @@
             reply->writeStrongBinder(IInterface::asBinder(omx));
             return NO_ERROR;
         } break;
-        case MAKE_CRYPTO: {
-            CHECK_INTERFACE(IMediaPlayerService, data, reply);
-            sp<ICrypto> crypto = makeCrypto();
-            reply->writeStrongBinder(IInterface::asBinder(crypto));
-            return NO_ERROR;
-        } break;
-        case MAKE_DRM: {
-            CHECK_INTERFACE(IMediaPlayerService, data, reply);
-            sp<IDrm> drm = makeDrm();
-            reply->writeStrongBinder(IInterface::asBinder(drm));
-            return NO_ERROR;
-        } break;
         case MAKE_HDCP: {
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
             bool createEncryptionModule = data.readInt32();
@@ -220,6 +190,10 @@
             const String16 opPackageName = data.readString16();
             sp<IRemoteDisplayClient> client(
                     interface_cast<IRemoteDisplayClient>(data.readStrongBinder()));
+            if (client == NULL) {
+                reply->writeStrongBinder(NULL);
+                return NO_ERROR;
+            }
             String8 iface(data.readString8());
             sp<IRemoteDisplay> display(listenForRemoteDisplay(opPackageName, client, iface));
             reply->writeStrongBinder(IInterface::asBinder(display));
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index ee3b584..cded55c 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -23,7 +23,8 @@
 
 #include <utils/Log.h>
 #include <binder/Parcel.h>
-#include <camera/ICamera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <camera/ICameraRecordingProxy.h>
 #include <media/IMediaRecorderClient.h>
 #include <media/IMediaRecorder.h>
 #include <gui/Surface.h>
@@ -54,7 +55,9 @@
     SET_PREVIEW_SURFACE,
     SET_CAMERA,
     SET_LISTENER,
-    SET_CLIENT_NAME
+    SET_CLIENT_NAME,
+    PAUSE,
+    RESUME
 };
 
 class BpMediaRecorder: public BpInterface<IMediaRecorder>
@@ -65,7 +68,7 @@
     {
     }
 
-    status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy)
+    status_t setCamera(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy)
     {
         ALOGV("setCamera(%p,%p)", camera.get(), proxy.get());
         Parcel data, reply;
@@ -276,6 +279,24 @@
         return reply.readInt32();
     }
 
+    status_t pause()
+    {
+        ALOGV("pause");
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+        remote()->transact(PAUSE, data, &reply);
+        return reply.readInt32();
+    }
+
+    status_t resume()
+    {
+        ALOGV("resume");
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+        remote()->transact(RESUME, data, &reply);
+        return reply.readInt32();
+    }
+
     status_t close()
     {
         ALOGV("close");
@@ -340,6 +361,18 @@
             reply->writeInt32(start());
             return NO_ERROR;
         } break;
+        case PAUSE: {
+            ALOGV("PAUSE");
+            CHECK_INTERFACE(IMediaRecorder, data, reply);
+            reply->writeInt32(pause());
+            return NO_ERROR;
+        } break;
+        case RESUME: {
+            ALOGV("RESUME");
+            CHECK_INTERFACE(IMediaRecorder, data, reply);
+            reply->writeInt32(resume());
+            return NO_ERROR;
+        } break;
         case PREPARE: {
             ALOGV("PREPARE");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
@@ -447,9 +480,10 @@
         case SET_CAMERA: {
             ALOGV("SET_CAMERA");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
-            sp<ICamera> camera = interface_cast<ICamera>(data.readStrongBinder());
+            sp<hardware::ICamera> camera =
+                    interface_cast<hardware::ICamera>(data.readStrongBinder());
             sp<ICameraRecordingProxy> proxy =
-                interface_cast<ICameraRecordingProxy>(data.readStrongBinder());
+                    interface_cast<ICameraRecordingProxy>(data.readStrongBinder());
             reply->writeInt32(setCamera(camera, proxy));
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
new file mode 100644
index 0000000..7e40e4f
--- /dev/null
+++ b/media/libmedia/IMediaSource.cpp
@@ -0,0 +1,465 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "BpMediaSource"
+#include <utils/Log.h>
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+#include <media/IMediaSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+enum {
+    START = IBinder::FIRST_CALL_TRANSACTION,
+    STOP,
+    PAUSE,
+    GETFORMAT,
+    READ,
+    READMULTIPLE,
+    RELEASE_BUFFER
+};
+
+enum {
+    NULL_BUFFER,
+    SHARED_BUFFER,
+    INLINE_BUFFER
+};
+
+class RemoteMediaBufferReleaser : public BBinder {
+public:
+    RemoteMediaBufferReleaser(MediaBuffer *buf, sp<BnMediaSource> owner) {
+        mBuf = buf;
+        mOwner = owner;
+    }
+    ~RemoteMediaBufferReleaser() {
+        if (mBuf) {
+            ALOGW("RemoteMediaBufferReleaser dtor called while still holding buffer");
+            mBuf->release();
+        }
+    }
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0) {
+        if (code == RELEASE_BUFFER) {
+            mBuf->release();
+            mBuf = NULL;
+            return OK;
+        } else {
+            return BBinder::onTransact(code, data, reply, flags);
+        }
+    }
+private:
+    MediaBuffer *mBuf;
+    // Keep a ref to ensure MediaBuffer is released before the owner, i.e., BnMediaSource,
+    // because BnMediaSource needs to delete MediaBufferGroup in its dtor and
+    // MediaBufferGroup dtor requires all MediaBuffer's have 0 ref count.
+    sp<BnMediaSource> mOwner;
+};
+
+
+class RemoteMediaBufferWrapper : public MediaBuffer {
+public:
+    RemoteMediaBufferWrapper(sp<IMemory> mem, sp<IBinder> source);
+protected:
+    virtual ~RemoteMediaBufferWrapper();
+private:
+    sp<IMemory> mMemory;
+    sp<IBinder> mRemoteSource;
+};
+
+RemoteMediaBufferWrapper::RemoteMediaBufferWrapper(sp<IMemory> mem, sp<IBinder> source)
+: MediaBuffer(mem->pointer(), mem->size()) {
+    mMemory = mem;
+    mRemoteSource = source;
+}
+
+RemoteMediaBufferWrapper::~RemoteMediaBufferWrapper() {
+    mMemory.clear();
+    // Explicitly ask the remote side to release the buffer. We could also just clear
+    // mRemoteSource, but that doesn't immediately release the reference on the remote side.
+    Parcel data, reply;
+    mRemoteSource->transact(RELEASE_BUFFER, data, &reply);
+    mRemoteSource.clear();
+}
+
+class BpMediaSource : public BpInterface<IMediaSource> {
+public:
+    BpMediaSource(const sp<IBinder>& impl)
+        : BpInterface<IMediaSource>(impl)
+    {
+    }
+
+    virtual status_t start(MetaData *params) {
+        ALOGV("start");
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
+        if (params) {
+            params->writeToParcel(data);
+        }
+        status_t ret = remote()->transact(START, data, &reply);
+        if (ret == NO_ERROR && params) {
+            ALOGW("ignoring potentially modified MetaData from start");
+            ALOGW("input:");
+            params->dumpToLog();
+            sp<MetaData> meta = MetaData::createFromParcel(reply);
+            ALOGW("output:");
+            meta->dumpToLog();
+        }
+        return ret;
+    }
+
+    virtual status_t stop() {
+        ALOGV("stop");
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
+        return remote()->transact(STOP, data, &reply);
+    }
+
+    virtual sp<MetaData> getFormat() {
+        ALOGV("getFormat");
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
+        status_t ret = remote()->transact(GETFORMAT, data, &reply);
+        if (ret == NO_ERROR) {
+            mMetaData = MetaData::createFromParcel(reply);
+            return mMetaData;
+        }
+        return NULL;
+    }
+
+    virtual status_t read(MediaBuffer **buffer, const ReadOptions *options) {
+        ALOGV("read");
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
+        if (options) {
+            data.writeByteArray(sizeof(*options), (uint8_t*) options);
+        }
+        status_t ret = remote()->transact(READ, data, &reply);
+        if (ret != NO_ERROR) {
+            return ret;
+        }
+        // wrap the returned data in a MediaBuffer
+        ret = reply.readInt32();
+        int32_t buftype = reply.readInt32();
+        if (buftype == SHARED_BUFFER) {
+            sp<IBinder> remote = reply.readStrongBinder();
+            sp<IBinder> binder = reply.readStrongBinder();
+            sp<IMemory> mem = interface_cast<IMemory>(binder);
+            if (mem == NULL) {
+                ALOGE("received NULL IMemory for shared buffer");
+            }
+            size_t offset = reply.readInt32();
+            size_t length = reply.readInt32();
+            MediaBuffer *buf = new RemoteMediaBufferWrapper(mem, remote);
+            buf->set_range(offset, length);
+            buf->meta_data()->updateFromParcel(reply);
+            *buffer = buf;
+        } else if (buftype == NULL_BUFFER) {
+            ALOGV("got status %d and NULL buffer", ret);
+            *buffer = NULL;
+        } else {
+            int32_t len = reply.readInt32();
+            ALOGV("got status %d and len %d", ret, len);
+            *buffer = new MediaBuffer(len);
+            reply.read((*buffer)->data(), len);
+            (*buffer)->meta_data()->updateFromParcel(reply);
+        }
+        return ret;
+    }
+
+    virtual status_t readMultiple(Vector<MediaBuffer *> *buffers, uint32_t maxNumBuffers) {
+        ALOGV("readMultiple");
+        if (buffers == NULL || !buffers->isEmpty()) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
+        data.writeUint32(maxNumBuffers);
+        status_t ret = remote()->transact(READMULTIPLE, data, &reply);
+        if (ret != NO_ERROR) {
+            return ret;
+        }
+        // wrap the returned data in a vector of MediaBuffers
+        int32_t bufCount = 0;
+        while (1) {
+            if (reply.readInt32() == 0) {
+                break;
+            }
+            int32_t len = reply.readInt32();
+            ALOGV("got len %d", len);
+            MediaBuffer *buf = new MediaBuffer(len);
+            reply.read(buf->data(), len);
+            buf->meta_data()->updateFromParcel(reply);
+            buffers->push_back(buf);
+            ++bufCount;
+        }
+        ret = reply.readInt32();
+        ALOGV("got status %d, bufCount %d", ret, bufCount);
+        return ret;
+    }
+
+    virtual status_t pause() {
+        ALOGV("pause");
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
+        return remote()->transact(PAUSE, data, &reply);
+    }
+
+    virtual status_t setBuffers(const Vector<MediaBuffer *> & buffers __unused) {
+        ALOGV("setBuffers NOT IMPLEMENTED");
+        return ERROR_UNSUPPORTED; // default
+    }
+
+private:
+    // NuPlayer passes pointers-to-metadata around, so we use this to keep the metadata alive
+    // XXX: could we use this for caching, or does metadata change on the fly?
+    sp<MetaData> mMetaData;
+
+};
+
+IMPLEMENT_META_INTERFACE(MediaSource, "android.media.IMediaSource");
+
+#undef LOG_TAG
+#define LOG_TAG "BnMediaSource"
+
+BnMediaSource::BnMediaSource()
+    : mGroup(NULL) {
+}
+
+BnMediaSource::~BnMediaSource() {
+    delete mGroup;
+    mGroup = NULL;
+}
+
+status_t BnMediaSource::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch (code) {
+        case START: {
+            ALOGV("start");
+            CHECK_INTERFACE(IMediaSource, data, reply);
+            sp<MetaData> meta;
+            if (data.dataAvail()) {
+                meta = MetaData::createFromParcel(data);
+            }
+            status_t ret = start(meta.get());
+            if (ret == NO_ERROR && meta != NULL) {
+                meta->writeToParcel(*reply);
+            }
+            return ret;
+        }
+        case STOP: {
+            ALOGV("stop");
+            CHECK_INTERFACE(IMediaSource, data, reply);
+            return stop();
+        }
+        case PAUSE: {
+            ALOGV("pause");
+            CHECK_INTERFACE(IMediaSource, data, reply);
+            return pause();
+        }
+        case GETFORMAT: {
+            ALOGV("getFormat");
+            CHECK_INTERFACE(IMediaSource, data, reply);
+            sp<MetaData> meta = getFormat();
+            if (meta != NULL) {
+                meta->writeToParcel(*reply);
+                return NO_ERROR;
+            }
+            return UNKNOWN_ERROR;
+        }
+        case READ: {
+            ALOGV("read");
+            CHECK_INTERFACE(IMediaSource, data, reply);
+            status_t ret;
+            MediaBuffer *buf = NULL;
+            ReadOptions opts;
+            uint32_t len;
+            if (data.readUint32(&len) == NO_ERROR &&
+                    len == sizeof(opts) && data.read((void*)&opts, len) == NO_ERROR) {
+                ret = read(&buf, &opts);
+            } else {
+                ret = read(&buf, NULL);
+            }
+
+            reply->writeInt32(ret);
+            if (buf != NULL) {
+                size_t usedSize = buf->range_length();
+                // even if we're using shared memory, we might not want to use it, since for small
+                // sizes it's faster to copy data through the Binder transaction
+                // On the other hand, if the data size is large enough, it's better to use shared
+                // memory. When data is too large, binder can't handle it.
+                if (usedSize >= MediaBuffer::kSharedMemThreshold) {
+                    ALOGV("use shared memory: %zu", usedSize);
+
+                    MediaBuffer *transferBuf = buf;
+                    size_t offset = buf->range_offset();
+                    if (transferBuf->mMemory == NULL) {
+                        if (mGroup == NULL) {
+                            mGroup = new MediaBufferGroup;
+                            size_t allocateSize = usedSize;
+                            if (usedSize < SIZE_MAX / 3) {
+                                allocateSize = usedSize * 3 / 2;
+                            }
+                            mGroup->add_buffer(new MediaBuffer(allocateSize));
+                        }
+
+                        MediaBuffer *newBuf = NULL;
+                        ret = mGroup->acquire_buffer(
+                                &newBuf, false /* nonBlocking */, usedSize);
+                        if (ret != OK || newBuf == NULL || newBuf->mMemory == NULL) {
+                            ALOGW("failed to acquire shared memory, ret %d", ret);
+                            buf->release();
+                            if (newBuf != NULL) {
+                                newBuf->release();
+                            }
+                            reply->writeInt32(NULL_BUFFER);
+                            return NO_ERROR;
+                        }
+                        transferBuf = newBuf;
+                        memcpy(transferBuf->data(), (uint8_t*)buf->data() + buf->range_offset(),
+                                buf->range_length());
+                        offset = 0;
+                    }
+
+                    reply->writeInt32(SHARED_BUFFER);
+                    RemoteMediaBufferReleaser *wrapper =
+                        new RemoteMediaBufferReleaser(transferBuf, this);
+                    reply->writeStrongBinder(wrapper);
+                    reply->writeStrongBinder(IInterface::asBinder(transferBuf->mMemory));
+                    reply->writeInt32(offset);
+                    reply->writeInt32(usedSize);
+                    buf->meta_data()->writeToParcel(*reply);
+                    if (buf->mMemory == NULL) {
+                        buf->release();
+                    }
+                } else {
+                    // buffer is small: copy it
+                    if (buf->mMemory != NULL) {
+                        ALOGV("%zu shared mem available, but only %zu used", buf->mMemory->size(), buf->range_length());
+                    }
+                    reply->writeInt32(INLINE_BUFFER);
+                    reply->writeByteArray(buf->range_length(), (uint8_t*)buf->data() + buf->range_offset());
+                    buf->meta_data()->writeToParcel(*reply);
+                    buf->release();
+                }
+            } else {
+                ALOGV("ret %d, buf %p", ret, buf);
+                reply->writeInt32(NULL_BUFFER);
+            }
+            return NO_ERROR;
+        }
+        case READMULTIPLE: {
+            ALOGV("readmultiple");
+            CHECK_INTERFACE(IMediaSource, data, reply);
+            uint32_t maxNumBuffers;
+            data.readUint32(&maxNumBuffers);
+            status_t ret = NO_ERROR;
+            uint32_t bufferCount = 0;
+            if (maxNumBuffers > kMaxNumReadMultiple) {
+                maxNumBuffers = kMaxNumReadMultiple;
+            }
+            while (bufferCount < maxNumBuffers) {
+                if (reply->dataSize() >= MediaBuffer::kSharedMemThreshold) {
+                    break;
+                }
+
+                MediaBuffer *buf = NULL;
+                ret = read(&buf, NULL);
+                if (ret != NO_ERROR || buf == NULL) {
+                    break;
+                }
+                ++bufferCount;
+                reply->writeInt32(1);  // indicate one more MediaBuffer.
+                reply->writeByteArray(
+                        buf->range_length(), (uint8_t*)buf->data() + buf->range_offset());
+                buf->meta_data()->writeToParcel(*reply);
+                buf->release();
+            }
+            reply->writeInt32(0);  // indicate no more MediaBuffer.
+            reply->writeInt32(ret);
+            return NO_ERROR;
+        }
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+IMediaSource::ReadOptions::ReadOptions() {
+    reset();
+}
+
+void IMediaSource::ReadOptions::reset() {
+    mOptions = 0;
+    mSeekTimeUs = 0;
+    mLatenessUs = 0;
+    mNonBlocking = false;
+}
+
+void IMediaSource::ReadOptions::setNonBlocking() {
+    mNonBlocking = true;
+}
+
+void IMediaSource::ReadOptions::clearNonBlocking() {
+    mNonBlocking = false;
+}
+
+bool IMediaSource::ReadOptions::getNonBlocking() const {
+    return mNonBlocking;
+}
+
+void IMediaSource::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
+    mOptions |= kSeekTo_Option;
+    mSeekTimeUs = time_us;
+    mSeekMode = mode;
+}
+
+void IMediaSource::ReadOptions::clearSeekTo() {
+    mOptions &= ~kSeekTo_Option;
+    mSeekTimeUs = 0;
+    mSeekMode = SEEK_CLOSEST_SYNC;
+}
+
+bool IMediaSource::ReadOptions::getSeekTo(
+        int64_t *time_us, SeekMode *mode) const {
+    *time_us = mSeekTimeUs;
+    *mode = mSeekMode;
+    return (mOptions & kSeekTo_Option) != 0;
+}
+
+void IMediaSource::ReadOptions::setLateBy(int64_t lateness_us) {
+    mLatenessUs = lateness_us;
+}
+
+int64_t IMediaSource::ReadOptions::getLateBy() const {
+    return mLatenessUs;
+}
+
+
+}  // namespace android
+
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index b082fe4..8ebb355 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -25,6 +25,7 @@
 #include <media/IOMX.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/openmax/OMX_IndexExt.h>
+#include <utils/NativeHandle.h>
 
 namespace android {
 
@@ -40,7 +41,7 @@
     GET_CONFIG,
     SET_CONFIG,
     GET_STATE,
-    ENABLE_GRAPHIC_BUFFERS,
+    ENABLE_NATIVE_BUFFERS,
     USE_BUFFER,
     USE_GRAPHIC_BUFFER,
     CREATE_INPUT_SURFACE,
@@ -49,7 +50,7 @@
     SIGNAL_END_OF_INPUT_STREAM,
     STORE_META_DATA_IN_BUFFERS,
     PREPARE_FOR_ADAPTIVE_PLAYBACK,
-    ALLOC_BUFFER,
+    ALLOC_SECURE_BUFFER,
     ALLOC_BUFFER_WITH_BACKUP,
     FREE_BUFFER,
     FILL_BUFFER,
@@ -60,6 +61,7 @@
     SET_INTERNAL_OPTION,
     UPDATE_GRAPHIC_BUFFER_IN_META,
     CONFIGURE_VIDEO_TUNNEL_MODE,
+    UPDATE_NATIVE_HANDLE_IN_META,
 };
 
 class BpOMX : public BpInterface<IOMX> {
@@ -101,7 +103,9 @@
     }
 
     virtual status_t allocateNode(
-            const char *name, const sp<IOMXObserver> &observer, node_id *node) {
+            const char *name, const sp<IOMXObserver> &observer,
+            sp<IBinder> *nodeBinder,
+            node_id *node) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeCString(name);
@@ -111,6 +115,9 @@
         status_t err = reply.readInt32();
         if (err == OK) {
             *node = (node_id)reply.readInt32();
+            if (nodeBinder != NULL) {
+                *nodeBinder = remote();
+            }
         } else {
             *node = 0;
         }
@@ -220,14 +227,15 @@
         return reply.readInt32();
     }
 
-    virtual status_t enableGraphicBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+    virtual status_t enableNativeBuffers(
+            node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
         data.writeInt32(port_index);
+        data.writeInt32((uint32_t)graphic);
         data.writeInt32((uint32_t)enable);
-        remote()->transact(ENABLE_GRAPHIC_BUFFERS, data, &reply);
+        remote()->transact(ENABLE_NATIVE_BUFFERS, data, &reply);
 
         status_t err = reply.readInt32();
         return err;
@@ -307,14 +315,33 @@
         return err;
     }
 
-    virtual status_t createInputSurface(
+    virtual status_t updateNativeHandleInMeta(
             node_id node, OMX_U32 port_index,
+            const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        data.writeInt32((int32_t)node);
+        data.writeInt32(port_index);
+        data.writeInt32(nativeHandle != NULL);
+        if (nativeHandle != NULL) {
+            data.writeNativeHandle(nativeHandle->handle());
+        }
+        data.writeInt32((int32_t)buffer);
+        remote()->transact(UPDATE_NATIVE_HANDLE_IN_META, data, &reply);
+
+        status_t err = reply.readInt32();
+        return err;
+    }
+
+    virtual status_t createInputSurface(
+            node_id node, OMX_U32 port_index, android_dataspace dataSpace,
             sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
         Parcel data, reply;
         status_t err;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
         data.writeInt32(port_index);
+        data.writeInt32(dataSpace);
         err = remote()->transact(CREATE_INPUT_SURFACE, data, &reply);
         if (err != OK) {
             ALOGW("binder transaction failed: %d", err);
@@ -409,7 +436,9 @@
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
         data.writeInt32(port_index);
-        data.writeInt32((uint32_t)enable);
+        data.writeInt32((int32_t)enable);
+        data.writeInt32(type == NULL ? kMetadataBufferTypeANWBuffer : *type);
+
         remote()->transact(STORE_META_DATA_IN_BUFFERS, data, &reply);
 
         // read type even storeMetaDataInBuffers failed
@@ -456,26 +485,32 @@
     }
 
 
-    virtual status_t allocateBuffer(
+    virtual status_t allocateSecureBuffer(
             node_id node, OMX_U32 port_index, size_t size,
-            buffer_id *buffer, void **buffer_data) {
+            buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
         data.writeInt32(port_index);
         data.writeInt64(size);
-        remote()->transact(ALLOC_BUFFER, data, &reply);
+        remote()->transact(ALLOC_SECURE_BUFFER, data, &reply);
 
         status_t err = reply.readInt32();
         if (err != OK) {
             *buffer = 0;
-
+            *buffer_data = NULL;
+            *native_handle = NULL;
             return err;
         }
 
         *buffer = (buffer_id)reply.readInt32();
         *buffer_data = (void *)reply.readInt64();
-
+        if (*buffer_data == NULL) {
+            *native_handle = NativeHandle::create(
+                    reply.readNativeHandle(), true /* ownsHandle */);
+        } else {
+            *native_handle = NULL;
+        }
         return err;
     }
 
@@ -645,9 +680,16 @@
             sp<IOMXObserver> observer =
                 interface_cast<IOMXObserver>(data.readStrongBinder());
 
+            if (name == NULL || observer == NULL) {
+                ALOGE("b/26392700");
+                reply->writeInt32(INVALID_OPERATION);
+                return NO_ERROR;
+            }
+
             node_id node;
 
-            status_t err = allocateNode(name, observer, &node);
+            status_t err = allocateNode(name, observer,
+                    NULL /* nodeBinder */, &node);
             reply->writeInt32(err);
             if (err == OK) {
                 reply->writeInt32((int32_t)node);
@@ -794,15 +836,16 @@
             return NO_ERROR;
         }
 
-        case ENABLE_GRAPHIC_BUFFERS:
+        case ENABLE_NATIVE_BUFFERS:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
 
             node_id node = (node_id)data.readInt32();
             OMX_U32 port_index = data.readInt32();
+            OMX_BOOL graphic = (OMX_BOOL)data.readInt32();
             OMX_BOOL enable = (OMX_BOOL)data.readInt32();
 
-            status_t err = enableGraphicBuffers(node, port_index, enable);
+            status_t err = enableNativeBuffers(node, port_index, graphic, enable);
             reply->writeInt32(err);
 
             return NO_ERROR;
@@ -833,6 +876,12 @@
                 interface_cast<IMemory>(data.readStrongBinder());
             OMX_U32 allottedSize = data.readInt32();
 
+            if (params == NULL) {
+                ALOGE("b/26392700");
+                reply->writeInt32(INVALID_OPERATION);
+                return NO_ERROR;
+            }
+
             buffer_id buffer;
             status_t err = useBuffer(node, port_index, params, &buffer, allottedSize);
             reply->writeInt32(err);
@@ -882,16 +931,36 @@
             return NO_ERROR;
         }
 
+        case UPDATE_NATIVE_HANDLE_IN_META:
+        {
+            CHECK_OMX_INTERFACE(IOMX, data, reply);
+
+            node_id node = (node_id)data.readInt32();
+            OMX_U32 port_index = data.readInt32();
+            native_handle *handle = NULL;
+            if (data.readInt32()) {
+                handle = data.readNativeHandle();
+            }
+            buffer_id buffer = (buffer_id)data.readInt32();
+
+            status_t err = updateNativeHandleInMeta(
+                    node, port_index, NativeHandle::create(handle, true /* ownshandle */), buffer);
+            reply->writeInt32(err);
+
+            return NO_ERROR;
+        }
+
         case CREATE_INPUT_SURFACE:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
 
             node_id node = (node_id)data.readInt32();
             OMX_U32 port_index = data.readInt32();
+            android_dataspace dataSpace = (android_dataspace)data.readInt32();
 
             sp<IGraphicBufferProducer> bufferProducer;
             MetadataBufferType type = kMetadataBufferTypeInvalid;
-            status_t err = createInputSurface(node, port_index, &bufferProducer, &type);
+            status_t err = createInputSurface(node, port_index, dataSpace, &bufferProducer, &type);
 
             if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
                 android_errorWriteLog(0x534e4554, "26324358");
@@ -937,10 +1006,16 @@
                     interface_cast<IGraphicBufferConsumer>(data.readStrongBinder());
 
             MetadataBufferType type = kMetadataBufferTypeInvalid;
-            status_t err = setInputSurface(node, port_index, bufferConsumer, &type);
 
-            if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
-                android_errorWriteLog(0x534e4554, "26324358");
+            status_t err = INVALID_OPERATION;
+            if (bufferConsumer == NULL) {
+                ALOGE("b/26392700");
+            } else {
+                err = setInputSurface(node, port_index, bufferConsumer, &type);
+
+                if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
+                   android_errorWriteLog(0x534e4554, "26324358");
+                }
             }
 
             reply->writeInt32(type);
@@ -968,7 +1043,7 @@
             OMX_U32 port_index = data.readInt32();
             OMX_BOOL enable = (OMX_BOOL)data.readInt32();
 
-            MetadataBufferType type = kMetadataBufferTypeInvalid;
+            MetadataBufferType type = (MetadataBufferType)data.readInt32();
             status_t err = storeMetaDataInBuffers(node, port_index, enable, &type);
 
             reply->writeInt32(type);
@@ -1014,7 +1089,7 @@
             return NO_ERROR;
         }
 
-        case ALLOC_BUFFER:
+        case ALLOC_SECURE_BUFFER:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
 
@@ -1029,14 +1104,18 @@
             size_t size = data.readInt64();
 
             buffer_id buffer;
-            void *buffer_data;
-            status_t err = allocateBuffer(
-                    node, port_index, size, &buffer, &buffer_data);
+            void *buffer_data = NULL;
+            sp<NativeHandle> native_handle;
+            status_t err = allocateSecureBuffer(
+                    node, port_index, size, &buffer, &buffer_data, &native_handle);
             reply->writeInt32(err);
 
             if (err == OK) {
                 reply->writeInt32((int32_t)buffer);
                 reply->writeInt64((uintptr_t)buffer_data);
+                if (buffer_data == NULL) {
+                    reply->writeNativeHandle(native_handle == NULL ? NULL : native_handle->handle());
+                }
             }
 
             return NO_ERROR;
@@ -1052,6 +1131,12 @@
                 interface_cast<IMemory>(data.readStrongBinder());
             OMX_U32 allottedSize = data.readInt32();
 
+            if (params == NULL) {
+                ALOGE("b/26392700");
+                reply->writeInt32(INVALID_OPERATION);
+                return NO_ERROR;
+            }
+
             buffer_id buffer;
             status_t err = allocateBufferWithBackup(
                     node, port_index, params, &buffer, allottedSize);
@@ -1115,6 +1200,12 @@
             node_id node = (node_id)data.readInt32();
             const char *parameter_name = data.readCString();
 
+            if (parameter_name == NULL) {
+                ALOGE("b/26392700");
+                reply->writeInt32(INVALID_OPERATION);
+                return NO_ERROR;
+            }
+
             OMX_INDEXTYPE index;
             status_t err = getExtensionIndex(node, parameter_name, &index);
 
diff --git a/media/libmedia/IResourceManagerService.cpp b/media/libmedia/IResourceManagerService.cpp
index 4598686..6cb4440 100644
--- a/media/libmedia/IResourceManagerService.cpp
+++ b/media/libmedia/IResourceManagerService.cpp
@@ -132,6 +132,9 @@
             int64_t clientId = data.readInt64();
             sp<IResourceManagerClient> client(
                     interface_cast<IResourceManagerClient>(data.readStrongBinder()));
+            if (client == NULL) {
+                return NO_ERROR;
+            }
             Vector<MediaResource> resources;
             readFromParcel(data, &resources);
             addResource(pid, clientId, client, resources);
diff --git a/media/libmedia/IStreamSource.cpp b/media/libmedia/IStreamSource.cpp
index 840e453..8c0905c 100644
--- a/media/libmedia/IStreamSource.cpp
+++ b/media/libmedia/IStreamSource.cpp
@@ -111,7 +111,11 @@
                 sp<IMemory> mem =
                     interface_cast<IMemory>(data.readStrongBinder());
 
-                buffers.push(mem);
+                if (mem != NULL) {
+                    buffers.push(mem);
+                } else if (data.dataAvail() == 0) {
+                    break;
+                }
             }
             setBuffers(buffers);
             break;
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 8d3fa7b..1b3b3eb 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -26,8 +26,6 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <binder/Parcel.h>
 
-#include <media/stagefright/OMXCodec.h>
-
 namespace android {
 
 void MediaCodecInfo::Capabilities::getSupportedProfileLevels(
@@ -77,6 +75,8 @@
     }
     uint32_t flags = static_cast<uint32_t>(parcel.readInt32());
     sp<AMessage> details = AMessage::FromParcel(parcel);
+    if (details == NULL)
+        return NULL;
     if (caps != NULL) {
         caps->mFlags = flags;
         caps->mDetails = details;
@@ -101,6 +101,21 @@
     return OK;
 }
 
+void MediaCodecInfo::CapabilitiesBuilder::addProfileLevel(uint32_t profile, uint32_t level) {
+    ProfileLevel profileLevel;
+    profileLevel.mProfile = profile;
+    profileLevel.mLevel = level;
+    mProfileLevels.push_back(profileLevel);
+}
+
+void MediaCodecInfo::CapabilitiesBuilder::addColorFormat(uint32_t format) {
+    mColorFormats.push(format);
+}
+
+void MediaCodecInfo::CapabilitiesBuilder::addFlags(uint32_t flags) {
+    mFlags |= flags;
+}
+
 bool MediaCodecInfo::isEncoder() const {
     return mIsEncoder;
 }
@@ -150,6 +165,8 @@
     for (size_t i = 0; i < size; i++) {
         AString mime = AString::FromParcel(parcel);
         sp<Capabilities> caps = Capabilities::FromParcel(parcel);
+        if (caps == NULL)
+            return NULL;
         if (info != NULL) {
             info->mCaps.add(mime, caps);
         }
@@ -225,26 +242,15 @@
     }
 }
 
-status_t MediaCodecInfo::initializeCapabilities(const CodecCapabilities &caps) {
-    mCurrentCaps->mProfileLevels.clear();
+status_t MediaCodecInfo::initializeCapabilities(const sp<Capabilities> &caps) {
+    // TRICKY: copy data to mCurrentCaps as it is a reference to
+    // an element of the capabilites map.
     mCurrentCaps->mColorFormats.clear();
-
-    for (size_t i = 0; i < caps.mProfileLevels.size(); ++i) {
-        const CodecProfileLevel &src = caps.mProfileLevels.itemAt(i);
-
-        ProfileLevel profileLevel;
-        profileLevel.mProfile = src.mProfile;
-        profileLevel.mLevel = src.mLevel;
-        mCurrentCaps->mProfileLevels.push_back(profileLevel);
-    }
-
-    for (size_t i = 0; i < caps.mColorFormats.size(); ++i) {
-        mCurrentCaps->mColorFormats.push_back(caps.mColorFormats.itemAt(i));
-    }
-
-    mCurrentCaps->mFlags = caps.mFlags;
-    mCurrentCaps->mDetails = new AMessage;
-
+    mCurrentCaps->mColorFormats.appendVector(caps->mColorFormats);
+    mCurrentCaps->mProfileLevels.clear();
+    mCurrentCaps->mProfileLevels.appendVector(caps->mProfileLevels);
+    mCurrentCaps->mFlags = caps->mFlags;
+    mCurrentCaps->mDetails = caps->mDetails;
     return OK;
 }
 
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index c5790fb..ff0e52e 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -23,7 +23,7 @@
 #include <utils/Log.h>
 #include <utils/Vector.h>
 #include <cutils/properties.h>
-#include <libexpat/expat.h>
+#include <expat.h>
 #include <media/MediaProfiles.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <OMX_Video.h>
@@ -37,7 +37,8 @@
 const MediaProfiles::NameToTagMap MediaProfiles::sVideoEncoderNameMap[] = {
     {"h263", VIDEO_ENCODER_H263},
     {"h264", VIDEO_ENCODER_H264},
-    {"m4v",  VIDEO_ENCODER_MPEG_4_SP}
+    {"m4v",  VIDEO_ENCODER_MPEG_4_SP},
+    {"hevc", VIDEO_ENCODER_HEVC}
 };
 
 const MediaProfiles::NameToTagMap MediaProfiles::sAudioEncoderNameMap[] = {
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
index 40ec0cb..e636a50 100644
--- a/media/libmedia/MediaResource.cpp
+++ b/media/libmedia/MediaResource.cpp
@@ -21,38 +21,36 @@
 
 namespace android {
 
-const char kResourceSecureCodec[] = "secure-codec";
-const char kResourceNonSecureCodec[] = "non-secure-codec";
-const char kResourceAudioCodec[] = "audio-codec";
-const char kResourceVideoCodec[] = "video-codec";
-const char kResourceGraphicMemory[] = "graphic-memory";
+MediaResource::MediaResource()
+        : mType(kUnspecified),
+          mSubType(kUnspecifiedSubType),
+          mValue(0) {}
 
-MediaResource::MediaResource() : mValue(0) {}
-
-MediaResource::MediaResource(String8 type, uint64_t value)
+MediaResource::MediaResource(Type type, uint64_t value)
         : mType(type),
+          mSubType(kUnspecifiedSubType),
           mValue(value) {}
 
-MediaResource::MediaResource(String8 type, String8 subType, uint64_t value)
+MediaResource::MediaResource(Type type, SubType subType, uint64_t value)
         : mType(type),
           mSubType(subType),
           mValue(value) {}
 
 void MediaResource::readFromParcel(const Parcel &parcel) {
-    mType = parcel.readString8();
-    mSubType = parcel.readString8();
+    mType = static_cast<Type>(parcel.readInt32());
+    mSubType = static_cast<SubType>(parcel.readInt32());
     mValue = parcel.readUint64();
 }
 
 void MediaResource::writeToParcel(Parcel *parcel) const {
-    parcel->writeString8(mType);
-    parcel->writeString8(mSubType);
+    parcel->writeInt32(static_cast<int32_t>(mType));
+    parcel->writeInt32(static_cast<int32_t>(mSubType));
     parcel->writeUint64(mValue);
 }
 
 String8 MediaResource::toString() const {
     String8 str;
-    str.appendFormat("%s/%s:%llu", mType.string(), mSubType.string(), (unsigned long long)mValue);
+    str.appendFormat("%s/%s:%llu", asString(mType), asString(mSubType), (unsigned long long)mValue);
     return str;
 }
 
diff --git a/media/libmedia/MemoryLeakTrackUtil.cpp b/media/libmedia/MemoryLeakTrackUtil.cpp
index 554dbae..18f5f25 100644
--- a/media/libmedia/MemoryLeakTrackUtil.cpp
+++ b/media/libmedia/MemoryLeakTrackUtil.cpp
@@ -14,166 +14,84 @@
  * limitations under the License.
  */
 
-#include <media/MemoryLeakTrackUtil.h>
 
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MemoryLeackTrackUtil"
+#include <utils/Log.h>
+
+#include "media/MemoryLeakTrackUtil.h"
+#include <sstream>
 
 /*
- * The code here originally resided in MediaPlayerService.cpp and was
- * shamelessly copied over to support memory leak tracking from
- * multiple places.
+ * The code here originally resided in MediaPlayerService.cpp
  */
-namespace android {
 
+// Figure out the abi based on defined macros.
 #if defined(__arm__)
+#define ABI_STRING "arm"
+#elif defined(__aarch64__)
+#define ABI_STRING "arm64"
+#elif defined(__mips__) && !defined(__LP64__)
+#define ABI_STRING "mips"
+#elif defined(__mips__) && defined(__LP64__)
+#define ABI_STRING "mips64"
+#elif defined(__i386__)
+#define ABI_STRING "x86"
+#elif defined(__x86_64__)
+#define ABI_STRING "x86_64"
+#else
+#error "Unsupported ABI"
+#endif
+
+extern std::string backtrace_string(const uintptr_t* frames, size_t frame_count);
+
+namespace android {
 
 extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
         size_t* infoSize, size_t* totalMemory, size_t* backtraceSize);
 
 extern "C" void free_malloc_leak_info(uint8_t* info);
 
-// Use the String-class below instead of String8 to allocate all memory
-// beforehand and not reenter the heap while we are examining it...
-struct MyString8 {
-    static const size_t MAX_SIZE = 256 * 1024;
-
-    MyString8()
-        : mPtr((char *)malloc(MAX_SIZE)) {
-        *mPtr = '\0';
-    }
-
-    ~MyString8() {
-        free(mPtr);
-    }
-
-    void append(const char *s) {
-        strncat(mPtr, s, MAX_SIZE - size() - 1);
-    }
-
-    const char *string() const {
-        return mPtr;
-    }
-
-    size_t size() const {
-        return strlen(mPtr);
-    }
-
-    void clear() {
-        *mPtr = '\0';
-    }
-
-private:
-    char *mPtr;
-
-    MyString8(const MyString8 &);
-    MyString8 &operator=(const MyString8 &);
-};
-
-void dumpMemoryAddresses(int fd)
+std::string dumpMemoryAddresses(size_t limit)
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    MyString8 result;
-
-    typedef struct {
-        size_t size;
-        size_t dups;
-        intptr_t * backtrace;
-    } AllocEntry;
-
-    uint8_t *info = NULL;
-    size_t overallSize = 0;
-    size_t infoSize = 0;
-    size_t totalMemory = 0;
-    size_t backtraceSize = 0;
-
+    uint8_t *info;
+    size_t overallSize;
+    size_t infoSize;
+    size_t totalMemory;
+    size_t backtraceSize;
     get_malloc_leak_info(&info, &overallSize, &infoSize, &totalMemory, &backtraceSize);
-    if (info) {
-        uint8_t *ptr = info;
-        size_t count = overallSize / infoSize;
 
-        snprintf(buffer, SIZE, " Allocation count %i\n", count);
-        result.append(buffer);
-        snprintf(buffer, SIZE, " Total memory %i\n", totalMemory);
-        result.append(buffer);
-
-        AllocEntry * entries = new AllocEntry[count];
-
-        for (size_t i = 0; i < count; i++) {
-            // Each entry should be size_t, size_t, intptr_t[backtraceSize]
-            AllocEntry *e = &entries[i];
-
-            e->size = *reinterpret_cast<size_t *>(ptr);
-            ptr += sizeof(size_t);
-
-            e->dups = *reinterpret_cast<size_t *>(ptr);
-            ptr += sizeof(size_t);
-
-            e->backtrace = reinterpret_cast<intptr_t *>(ptr);
-            ptr += sizeof(intptr_t) * backtraceSize;
-        }
-
-        // Now we need to sort the entries.  They come sorted by size but
-        // not by stack trace which causes problems using diff.
-        bool moved;
-        do {
-            moved = false;
-            for (size_t i = 0; i < (count - 1); i++) {
-                AllocEntry *e1 = &entries[i];
-                AllocEntry *e2 = &entries[i+1];
-
-                bool swap = e1->size < e2->size;
-                if (e1->size == e2->size) {
-                    for(size_t j = 0; j < backtraceSize; j++) {
-                        if (e1->backtrace[j] == e2->backtrace[j]) {
-                            continue;
-                        }
-                        swap = e1->backtrace[j] < e2->backtrace[j];
-                        break;
-                    }
-                }
-                if (swap) {
-                    AllocEntry t = entries[i];
-                    entries[i] = entries[i+1];
-                    entries[i+1] = t;
-                    moved = true;
-                }
-            }
-        } while (moved);
-
-        write(fd, result.string(), result.size());
-        result.clear();
-
-        for (size_t i = 0; i < count; i++) {
-            AllocEntry *e = &entries[i];
-
-            snprintf(buffer, SIZE, "size %8i, dup %4i, ", e->size, e->dups);
-            result.append(buffer);
-            for (size_t ct = 0; (ct < backtraceSize) && e->backtrace[ct]; ct++) {
-                if (ct) {
-                    result.append(", ");
-                }
-                snprintf(buffer, SIZE, "0x%08x", e->backtrace[ct]);
-                result.append(buffer);
-            }
-            result.append("\n");
-
-            write(fd, result.string(), result.size());
-            result.clear();
-        }
-
-        delete[] entries;
-        free_malloc_leak_info(info);
+    size_t count;
+    if (info == nullptr || overallSize == 0 || infoSize == 0
+            || (count = overallSize / infoSize) == 0) {
+        ALOGD("no malloc info, libc.debug.malloc.program property should be set");
+        return std::string();
     }
+
+    std::ostringstream oss;
+    oss << totalMemory << " bytes in " << count << " allocations\n";
+    oss << "  ABI: '" ABI_STRING "'" << "\n\n";
+    if (count > limit) count = limit;
+
+    // The memory is sorted based on total size which is useful for finding
+    // worst memory offenders. For diffs, sometimes it is preferable to sort
+    // based on the backtrace.
+    for (size_t i = 0; i < count; i++) {
+        struct AllocEntry {
+            size_t size;  // bit 31 is set if this is zygote allocated memory
+            size_t allocations;
+            uintptr_t backtrace[];
+        };
+
+        const AllocEntry * const e = (AllocEntry *)(info + i * infoSize);
+
+        oss << (e->size * e->allocations)
+                << " bytes ( " << e->size << " bytes * " << e->allocations << " allocations )\n";
+        oss << backtrace_string(e->backtrace, backtraceSize) << "\n";
+    }
+    oss << "\n";
+    free_malloc_leak_info(info);
+    return oss.str();
 }
 
-#else
-// Does nothing
-void dumpMemoryAddresses(int fd __unused) {}
-
-#endif
 }  // namespace android
diff --git a/media/libmedia/MidiIoWrapper.cpp b/media/libmedia/MidiIoWrapper.cpp
index 5197ce2..faae954 100644
--- a/media/libmedia/MidiIoWrapper.cpp
+++ b/media/libmedia/MidiIoWrapper.cpp
@@ -42,7 +42,7 @@
 
 MidiIoWrapper::MidiIoWrapper(int fd, off64_t offset, int64_t size) {
     ALOGV("MidiIoWrapper(fd=%d)", fd);
-    mFd = dup(fd);
+    mFd = fd < 0 ? -1 : dup(fd);
     mBase = offset;
     mLength = size;
 }
@@ -61,7 +61,9 @@
 
 MidiIoWrapper::~MidiIoWrapper() {
     ALOGV("~MidiIoWrapper");
-    close(mFd);
+    if (mFd >= 0) {
+        close(mFd);
+    }
 }
 
 int MidiIoWrapper::readAt(void *buffer, int offset, int size) {
@@ -70,6 +72,10 @@
     if (mDataSource != NULL) {
         return mDataSource->readAt(offset, buffer, size);
     }
+    if (mFd < 0) {
+        errno = EBADF;
+        return -1; // as per failed read.
+    }
     lseek(mFd, mBase + offset, SEEK_SET);
     if (offset + size > mLength) {
         size = mLength - offset;
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 6da5348..411519d 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -740,6 +740,13 @@
                         { .duration = 0 , .waveFreq = { 0 }, 0, 0}},
           .repeatCnt = ToneGenerator::TONEGEN_INF,
           .repeatSegment = 0 },                              // TONE_JAPAN_RADIO_ACK
+        { .segments = { { .duration = 400, .waveFreq = { 400, 450, 0 }, 0, 0 },
+                        { .duration = 200, .waveFreq = { 0 }, 0, 0 },
+                        { .duration = 400, .waveFreq = { 400, 450, 0 }, 0, 0 },
+                        { .duration = 2000, .waveFreq = { 0 }, 0, 0},
+                        { .duration = 0, .waveFreq = { 0 }, 0, 0}},
+          .repeatCnt = ToneGenerator::TONEGEN_INF,
+          .repeatSegment = 0 },                              // TONE_UK_RINGTONE
 
 
 
@@ -767,7 +774,18 @@
             TONE_SUP_ERROR,              // TONE_SUP_ERROR
             TONE_SUP_CALL_WAITING,       // TONE_SUP_CALL_WAITING
             TONE_SUP_RINGTONE            // TONE_SUP_RINGTONE
+        },
+        {   // UK
+            TONE_SUP_DIAL,               // TONE_SUP_DIAL
+            TONE_SUP_BUSY,               // TONE_SUP_BUSY
+            TONE_SUP_CONGESTION,         // TONE_SUP_CONGESTION
+            TONE_SUP_RADIO_ACK,          // TONE_SUP_RADIO_ACK
+            TONE_SUP_RADIO_NOTAVAIL,     // TONE_SUP_RADIO_NOTAVAIL
+            TONE_SUP_ERROR,              // TONE_SUP_ERROR
+            TONE_SUP_CALL_WAITING,       // TONE_SUP_CALL_WAITING
+            TONE_UK_RINGTONE             // TONE_SUP_RINGTONE
         }
+
 };
 
 
@@ -819,6 +837,9 @@
         mRegion = ANSI;
     } else if (strcmp(value,"jp") == 0) {
         mRegion = JAPAN;
+    } else if (strcmp(value,"uk") == 0 ||
+               strcmp(value,"uk,uk") == 0) {
+        mRegion = UK;
     } else {
         mRegion = CEPT;
     }
@@ -1580,7 +1601,8 @@
         }
         long dec = lAmplitude/count;
         // loop generation
-        while (count--) {
+        while (count) {
+            count--;
             Sample = ((lA1 * lS1) >> S_Q14) - lS2;
             // shift delay
             lS2 = lS1;
@@ -1591,7 +1613,8 @@
         }
     } else {
         // loop generation
-        while (count--) {
+        while (count) {
+            count--;
             Sample = ((lA1 * lS1) >> S_Q14) - lS2;
             // shift delay
             lS2 = lS1;
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index f5c1b1f..31e310b 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -38,7 +38,7 @@
          int32_t priority,
          effect_callback_t cbf,
          void* user,
-         int sessionId)
+         audio_session_t sessionId)
     :   AudioEffect(SL_IID_VISUALIZATION, opPackageName, NULL, priority, cbf, user, sessionId),
         mCaptureRate(CAPTURE_RATE_DEF),
         mCaptureSize(CAPTURE_SIZE_DEF),
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index dcce3cc..8725dfe 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -63,7 +63,7 @@
     mLeftVolume = mRightVolume = 1.0;
     mVideoWidth = mVideoHeight = 0;
     mLockThreadId = 0;
-    mAudioSessionId = AudioSystem::newAudioUniqueId();
+    mAudioSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
     AudioSystem::acquireAudioSessionId(mAudioSessionId, -1);
     mSendLevel = 0;
     mRetransmitEndpointValid = false;
@@ -181,27 +181,11 @@
     return err;
 }
 
-status_t MediaPlayer::setDataSource(const sp<IStreamSource> &source)
-{
-    ALOGV("setDataSource");
-    status_t err = UNKNOWN_ERROR;
-    const sp<IMediaPlayerService> service(getMediaPlayerService());
-    if (service != 0) {
-        sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
-        if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
-            (NO_ERROR != player->setDataSource(source))) {
-            player.clear();
-        }
-        err = attachNewPlayer(player);
-    }
-    return err;
-}
-
 status_t MediaPlayer::setDataSource(const sp<IDataSource> &source)
 {
     ALOGV("setDataSource(IDataSource)");
     status_t err = UNKNOWN_ERROR;
-    const sp<IMediaPlayerService>& service(getMediaPlayerService());
+    const sp<IMediaPlayerService> service(getMediaPlayerService());
     if (service != 0) {
         sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
         if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
@@ -223,7 +207,7 @@
         ALOGV("invoke %zu", request.dataSize());
         return  mPlayer->invoke(request, reply);
     }
-    ALOGE("invoke failed: wrong state %X", mCurrentState);
+    ALOGE("invoke failed: wrong state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
     return INVALID_OPERATION;
 }
 
@@ -268,7 +252,7 @@
         mCurrentState = MEDIA_PLAYER_PREPARING;
         return mPlayer->prepareAsync();
     }
-    ALOGE("prepareAsync called in state %d", mCurrentState);
+    ALOGE("prepareAsync called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
     return INVALID_OPERATION;
 }
 
@@ -334,7 +318,7 @@
             }
         }
     } else {
-        ALOGE("start called in state %d", mCurrentState);
+        ALOGE("start called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
         ret = INVALID_OPERATION;
     }
 
@@ -358,7 +342,7 @@
         }
         return ret;
     }
-    ALOGE("stop called in state %d", mCurrentState);
+    ALOGE("stop called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
     return INVALID_OPERATION;
 }
 
@@ -377,7 +361,7 @@
         }
         return ret;
     }
-    ALOGE("pause called in state %d", mCurrentState);
+    ALOGE("pause called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
     return INVALID_OPERATION;
 }
 
@@ -412,11 +396,22 @@
     }
     Mutex::Autolock _l(mLock);
     if (mPlayer == 0) return INVALID_OPERATION;
+
+    if (rate.mSpeed != 0.f && !(mCurrentState & MEDIA_PLAYER_STARTED)
+            && (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_PAUSED
+                    | MEDIA_PLAYER_PLAYBACK_COMPLETE))) {
+        mPlayer->setLooping(mLoop);
+        mPlayer->setVolume(mLeftVolume, mRightVolume);
+        mPlayer->setAuxEffectSendLevel(mSendLevel);
+    }
+
     status_t err = mPlayer->setPlaybackSettings(rate);
     if (err == OK) {
         if (rate.mSpeed == 0.f && mCurrentState == MEDIA_PLAYER_STARTED) {
             mCurrentState = MEDIA_PLAYER_PAUSED;
-        } else if (rate.mSpeed != 0.f && mCurrentState == MEDIA_PLAYER_PAUSED) {
+        } else if (rate.mSpeed != 0.f
+                && (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_PAUSED
+                    | MEDIA_PLAYER_PLAYBACK_COMPLETE))) {
             mCurrentState = MEDIA_PLAYER_STARTED;
         }
     }
@@ -500,7 +495,8 @@
         }
         return ret;
     }
-    ALOGE("Attempt to call getDuration without a valid mediaplayer");
+    ALOGE("Attempt to call getDuration in wrong state: mPlayer=%p, mCurrentState=%u",
+            mPlayer.get(), mCurrentState);
     return INVALID_OPERATION;
 }
 
@@ -664,7 +660,7 @@
     return OK;
 }
 
-status_t MediaPlayer::setAudioSessionId(int sessionId)
+status_t MediaPlayer::setAudioSessionId(audio_session_t sessionId)
 {
     ALOGV("MediaPlayer::setAudioSessionId(%d)", sessionId);
     Mutex::Autolock _l(mLock);
@@ -683,7 +679,7 @@
     return NO_ERROR;
 }
 
-int MediaPlayer::getAudioSessionId()
+audio_session_t MediaPlayer::getAudioSessionId()
 {
     Mutex::Autolock _l(mLock);
     return mAudioSessionId;
@@ -707,7 +703,7 @@
     if (mPlayer == 0 ||
         (mCurrentState & MEDIA_PLAYER_IDLE) ||
         (mCurrentState == MEDIA_PLAYER_STATE_ERROR )) {
-        ALOGE("attachAuxEffect called in state %d", mCurrentState);
+        ALOGE("attachAuxEffect called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
         return INVALID_OPERATION;
     }
 
@@ -922,6 +918,7 @@
 }
 
 status_t MediaPlayer::setNextMediaPlayer(const sp<MediaPlayer>& next) {
+    Mutex::Autolock _l(mLock);
     if (mPlayer == NULL) {
         return NO_INIT;
     }
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 0eb7de5..59c077a 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -32,7 +32,8 @@
 
 namespace android {
 
-status_t MediaRecorder::setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy)
+status_t MediaRecorder::setCamera(const sp<hardware::ICamera>& camera,
+        const sp<ICameraRecordingProxy>& proxy)
 {
     ALOGV("setCamera(%p,%p)", camera.get(), proxy.get());
     if (mMediaRecorder == NULL) {
@@ -561,6 +562,50 @@
     return ret;
 }
 
+status_t MediaRecorder::pause()
+{
+    ALOGV("pause");
+    if (mMediaRecorder == NULL) {
+        ALOGE("media recorder is not initialized yet");
+        return INVALID_OPERATION;
+    }
+    if (!(mCurrentState & MEDIA_RECORDER_RECORDING)) {
+        ALOGE("stop called in an invalid state: %d", mCurrentState);
+        return INVALID_OPERATION;
+    }
+
+    status_t ret = mMediaRecorder->pause();
+    if (OK != ret) {
+        ALOGE("pause failed: %d", ret);
+        mCurrentState = MEDIA_RECORDER_ERROR;
+        return ret;
+    }
+
+    return ret;
+}
+
+status_t MediaRecorder::resume()
+{
+    ALOGV("resume");
+    if (mMediaRecorder == NULL) {
+        ALOGE("media recorder is not initialized yet");
+        return INVALID_OPERATION;
+    }
+    if (!(mCurrentState & MEDIA_RECORDER_RECORDING)) {
+        ALOGE("resume called in an invalid state: %d", mCurrentState);
+        return INVALID_OPERATION;
+    }
+
+    status_t ret = mMediaRecorder->resume();
+    if (OK != ret) {
+        ALOGE("resume failed: %d", ret);
+        mCurrentState = MEDIA_RECORDER_ERROR;
+        return ret;
+    }
+
+    return ret;
+}
+
 status_t MediaRecorder::close()
 {
     ALOGV("close");
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 4d1b587..8d86366 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -8,17 +8,12 @@
 
 LOCAL_SRC_FILES:=               \
     ActivityManager.cpp         \
-    Crypto.cpp                  \
-    Drm.cpp                     \
-    DrmSessionManager.cpp       \
     HDCP.cpp                    \
     MediaPlayerFactory.cpp      \
     MediaPlayerService.cpp      \
     MediaRecorderClient.cpp     \
     MetadataRetrieverClient.cpp \
     RemoteDisplay.cpp           \
-    SharedLibrary.cpp           \
-    StagefrightPlayer.cpp       \
     StagefrightRecorder.cpp     \
     TestPlayerStub.cpp          \
 
@@ -33,6 +28,7 @@
     libgui                      \
     libmedia                    \
     libmediautils               \
+    libmemunreachable           \
     libsonivox                  \
     libstagefright              \
     libstagefright_foundation   \
@@ -45,14 +41,19 @@
 LOCAL_STATIC_LIBRARIES :=       \
     libstagefright_nuplayer     \
     libstagefright_rtsp         \
+    libstagefright_timedtext    \
 
 LOCAL_C_INCLUDES :=                                                 \
     $(TOP)/frameworks/av/media/libstagefright/include               \
     $(TOP)/frameworks/av/media/libstagefright/rtsp                  \
     $(TOP)/frameworks/av/media/libstagefright/wifi-display          \
     $(TOP)/frameworks/av/media/libstagefright/webm                  \
+    $(TOP)/frameworks/av/include/media                              \
+    $(TOP)/frameworks/av/include/camera                             \
     $(TOP)/frameworks/native/include/media/openmax                  \
+    $(TOP)/frameworks/native/include/media/hardware                 \
     $(TOP)/external/tremolo/Tremolo                                 \
+    libcore/include                                                 \
 
 LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
 LOCAL_CLANG := true
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index d5d12f7..605c710 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -31,7 +31,6 @@
 #include "MediaPlayerFactory.h"
 
 #include "TestPlayerStub.h"
-#include "StagefrightPlayer.h"
 #include "nuplayer/NuPlayerDriver.h"
 
 namespace android {
@@ -64,12 +63,6 @@
 }
 
 static player_type getDefaultPlayerType() {
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("media.stagefright.use-awesome", value, NULL)
-            && (!strcmp("1", value) || !strcasecmp("true", value))) {
-        return STAGEFRIGHT_PLAYER;
-    }
-
     return NU_PLAYER;
 }
 
@@ -176,63 +169,6 @@
  *                                                                           *
  *****************************************************************************/
 
-class StagefrightPlayerFactory :
-    public MediaPlayerFactory::IFactory {
-  public:
-    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
-                               int fd,
-                               int64_t offset,
-                               int64_t length,
-                               float /*curScore*/) {
-        if (legacyDrm()) {
-            sp<DataSource> source = new FileSource(dup(fd), offset, length);
-            String8 mimeType;
-            float confidence;
-            if (SniffWVM(source, &mimeType, &confidence, NULL /* format */)) {
-                return 1.0;
-            }
-        }
-
-        if (getDefaultPlayerType() == STAGEFRIGHT_PLAYER) {
-            char buf[20];
-            lseek(fd, offset, SEEK_SET);
-            read(fd, buf, sizeof(buf));
-            lseek(fd, offset, SEEK_SET);
-
-            uint32_t ident = *((uint32_t*)buf);
-
-            // Ogg vorbis?
-            if (ident == 0x5367674f) // 'OggS'
-                return 1.0;
-        }
-
-        return 0.0;
-    }
-
-    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
-                               const char* url,
-                               float /*curScore*/) {
-        if (legacyDrm() && !strncasecmp("widevine://", url, 11)) {
-            return 1.0;
-        }
-        return 0.0;
-    }
-
-    virtual sp<MediaPlayerBase> createPlayer(pid_t /* pid */) {
-        ALOGV(" create StagefrightPlayer");
-        return new StagefrightPlayer();
-    }
-  private:
-    bool legacyDrm() {
-        char value[PROPERTY_VALUE_MAX];
-        if (property_get("persist.sys.media.legacy-drm", value, NULL)
-                && (!strcmp("1", value) || !strcasecmp("true", value))) {
-            return true;
-        }
-        return false;
-    }
-};
-
 class NuPlayerFactory : public MediaPlayerFactory::IFactory {
   public:
     virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
@@ -310,7 +246,6 @@
     if (sInitComplete)
         return;
 
-    registerFactory_l(new StagefrightPlayerFactory(), STAGEFRIGHT_PLAYER);
     registerFactory_l(new NuPlayerFactory(), NU_PLAYER);
     registerFactory_l(new TestPlayerFactory(), TEST_PLAYER);
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index b5d1cdb..32f86df 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -57,11 +57,12 @@
 #include <media/MemoryLeakTrackUtil.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/AudioPlayer.h>
+#include <media/stagefright/Utils.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooperRoster.h>
 #include <mediautils/BatteryNotifier.h>
 
+#include <memunreachable/memunreachable.h>
 #include <system/audio.h>
 
 #include <private/android_filesystem_config.h>
@@ -73,13 +74,10 @@
 #include "MediaPlayerFactory.h"
 
 #include "TestPlayerStub.h"
-#include "StagefrightPlayer.h"
 #include "nuplayer/NuPlayerDriver.h"
 
 #include <OMX.h>
 
-#include "Crypto.h"
-#include "Drm.h"
 #include "HDCP.h"
 #include "HTTPBase.h"
 #include "RemoteDisplay.h"
@@ -95,6 +93,8 @@
 // Max number of entries in the filter.
 const int kMaxFilterSize = 64;  // I pulled that out of thin air.
 
+const float kMaxRequiredSpeed = 8.0f; // for PCM tracks allow up to 8x speedup.
+
 // FIXME: Move all the metadata related function in the Metadata.cpp
 
 
@@ -148,7 +148,7 @@
 
     if (p.dataAvail() < size)
     {
-        ALOGE("Filter too short expected %d but got %d", size, p.dataAvail());
+        ALOGE("Filter too short expected %zu but got %zu", size, p.dataAvail());
         *status = NOT_ENOUGH_DATA;
         return false;
     }
@@ -254,9 +254,6 @@
 
 
 static bool checkPermission(const char* permissionString) {
-#ifndef HAVE_ANDROID_OS
-    return true;
-#endif
     if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
     bool ok = checkCallingPermission(String16(permissionString));
     if (!ok) ALOGE("Request requires %s", permissionString);
@@ -289,9 +286,7 @@
     // reset battery stats
     // if the mediaserver has crashed, battery stats could be left
     // in bad state, reset the state upon service start.
-    BatteryNotifier& notifier(BatteryNotifier::getInstance());
-    notifier.noteResetVideo();
-    notifier.noteResetAudio();
+    BatteryNotifier::getInstance().noteResetVideo();
 
     MediaPlayerFactory::registerBuiltinFactories();
 }
@@ -328,7 +323,7 @@
 }
 
 sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client,
-        int audioSessionId)
+        audio_session_t audioSessionId)
 {
     pid_t pid = IPCThreadState::self()->getCallingPid();
     int32_t connId = android_atomic_inc(&mNextConnId);
@@ -353,6 +348,7 @@
 }
 
 sp<IOMX> MediaPlayerService::getOMX() {
+    ALOGI("MediaPlayerService::getOMX");
     Mutex::Autolock autoLock(mLock);
 
     if (mOMX.get() == NULL) {
@@ -362,14 +358,6 @@
     return mOMX;
 }
 
-sp<ICrypto> MediaPlayerService::makeCrypto() {
-    return new Crypto;
-}
-
-sp<IDrm> MediaPlayerService::makeDrm() {
-    return new Drm;
-}
-
 sp<IHDCP> MediaPlayerService::makeHDCP(bool createEncryptionModule) {
     return new HDCP(createEncryptionModule);
 }
@@ -540,13 +528,24 @@
         gLooperRoster.dump(fd, args);
 
         bool dumpMem = false;
+        bool unreachableMemory = false;
         for (size_t i = 0; i < args.size(); i++) {
             if (args[i] == String16("-m")) {
                 dumpMem = true;
+            } else if (args[i] == String16("--unreachable")) {
+                unreachableMemory = true;
             }
         }
         if (dumpMem) {
-            dumpMemoryAddresses(fd);
+            result.append("\nDumping memory:\n");
+            std::string s = dumpMemoryAddresses(100 /* limit */);
+            result.append(s.c_str(), s.size());
+        }
+        if (unreachableMemory) {
+            result.append("\nDumping unreachable memory:\n");
+            // TODO - should limit be an argument parameter?
+            std::string s = GetUnreachableMemoryString(true /* contents */, 10000 /* limit */);
+            result.append(s.c_str(), s.size());
         }
     }
     write(fd, result.string(), result.size());
@@ -562,7 +561,7 @@
 MediaPlayerService::Client::Client(
         const sp<MediaPlayerService>& service, pid_t pid,
         int32_t connId, const sp<IMediaPlayerClient>& client,
-        int audioSessionId, uid_t uid)
+        audio_session_t audioSessionId, uid_t uid)
 {
     ALOGV("Client(%d) constructor", connId);
     mPid = pid;
@@ -644,6 +643,28 @@
     return p;
 }
 
+MediaPlayerService::Client::ServiceDeathNotifier::ServiceDeathNotifier(
+        const sp<IBinder>& service,
+        const sp<MediaPlayerBase>& listener,
+        int which) {
+    mService = service;
+    mListener = listener;
+    mWhich = which;
+}
+
+MediaPlayerService::Client::ServiceDeathNotifier::~ServiceDeathNotifier() {
+    mService->unlinkToDeath(this);
+}
+
+void MediaPlayerService::Client::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
+    sp<MediaPlayerBase> listener = mListener.promote();
+    if (listener != NULL) {
+        listener->sendEvent(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
+    } else {
+        ALOGW("listener for process %d death is gone", mWhich);
+    }
+}
+
 sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
         player_type playerType)
 {
@@ -655,6 +676,15 @@
         return p;
     }
 
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.extractor"));
+    mExtractorDeathListener = new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
+    binder->linkToDeath(mExtractorDeathListener);
+
+    binder = sm->getService(String16("media.codec"));
+    mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
+    binder->linkToDeath(mCodecDeathListener);
+
     if (!p->hardwareOutput()) {
         Mutex::Autolock l(mLock);
         mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
@@ -734,7 +764,8 @@
 
 status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64_t length)
 {
-    ALOGV("setDataSource fd=%d, offset=%lld, length=%lld", fd, offset, length);
+    ALOGV("setDataSource fd=%d (%s), offset=%lld, length=%lld",
+            fd, nameForFd(fd).c_str(), (long long) offset, (long long) length);
     struct stat sb;
     int ret = fstat(fd, &sb);
     if (ret != 0) {
@@ -742,20 +773,19 @@
         return UNKNOWN_ERROR;
     }
 
-    ALOGV("st_dev  = %llu", static_cast<uint64_t>(sb.st_dev));
+    ALOGV("st_dev  = %llu", static_cast<unsigned long long>(sb.st_dev));
     ALOGV("st_mode = %u", sb.st_mode);
     ALOGV("st_uid  = %lu", static_cast<unsigned long>(sb.st_uid));
     ALOGV("st_gid  = %lu", static_cast<unsigned long>(sb.st_gid));
-    ALOGV("st_size = %llu", sb.st_size);
+    ALOGV("st_size = %llu", static_cast<unsigned long long>(sb.st_size));
 
     if (offset >= sb.st_size) {
         ALOGE("offset error");
-        ::close(fd);
         return UNKNOWN_ERROR;
     }
     if (offset + length > sb.st_size) {
         length = sb.st_size - offset;
-        ALOGV("calculated length = %lld", length);
+        ALOGV("calculated length = %lld", (long long)length);
     }
 
     player_type playerType = MediaPlayerFactory::getPlayerType(this,
@@ -1251,7 +1281,10 @@
             if (client->mAudioOutput != NULL)
                 client->mAudioOutput->switchToNextOutput();
             client->mNextClient->start();
-            client->mNextClient->mClient->notify(MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+            if (client->mNextClient->mClient != NULL) {
+                client->mNextClient->mClient->notify(
+                        MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+            }
         }
     }
 
@@ -1335,12 +1368,11 @@
 
 #undef LOG_TAG
 #define LOG_TAG "AudioSink"
-MediaPlayerService::AudioOutput::AudioOutput(int sessionId, int uid, int pid,
+MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, int uid, int pid,
         const audio_attributes_t* attr)
     : mCallback(NULL),
       mCallbackCookie(NULL),
       mCallbackData(NULL),
-      mBytesWritten(0),
       mStreamType(AUDIO_STREAM_MUSIC),
       mLeftVolume(1.0),
       mRightVolume(1.0),
@@ -1455,12 +1487,86 @@
     return mTrack->getTimestamp(ts);
 }
 
+// TODO: Remove unnecessary calls to getPlayedOutDurationUs()
+// as it acquires locks and may query the audio driver.
+//
+// Some calls could conceivably retrieve extrapolated data instead of
+// accessing getTimestamp() or getPosition() every time a data buffer with
+// a media time is received.
+//
+// Calculate duration of played samples if played at normal rate (i.e., 1.0).
+int64_t MediaPlayerService::AudioOutput::getPlayedOutDurationUs(int64_t nowUs) const
+{
+    Mutex::Autolock lock(mLock);
+    if (mTrack == 0 || mSampleRateHz == 0) {
+        return 0;
+    }
+
+    uint32_t numFramesPlayed;
+    int64_t numFramesPlayedAt;
+    AudioTimestamp ts;
+    static const int64_t kStaleTimestamp100ms = 100000;
+
+    status_t res = mTrack->getTimestamp(ts);
+    if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
+        numFramesPlayed = ts.mPosition;
+        numFramesPlayedAt = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
+        const int64_t timestampAge = nowUs - numFramesPlayedAt;
+        if (timestampAge > kStaleTimestamp100ms) {
+            // This is an audio FIXME.
+            // getTimestamp returns a timestamp which may come from audio mixing threads.
+            // After pausing, the MixerThread may go idle, thus the mTime estimate may
+            // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
+            // the max latency should be about 25ms with an average around 12ms (to be verified).
+            // For safety we use 100ms.
+            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
+                    (long long)nowUs, (long long)numFramesPlayedAt);
+            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
+        }
+        //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+    } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
+        numFramesPlayed = 0;
+        numFramesPlayedAt = nowUs;
+        //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
+        //        numFramesPlayed, (long long)numFramesPlayedAt);
+    } else {                         // case 3: transitory at new track or audio fast tracks.
+        res = mTrack->getPosition(&numFramesPlayed);
+        CHECK_EQ(res, (status_t)OK);
+        numFramesPlayedAt = nowUs;
+        numFramesPlayedAt += 1000LL * mTrack->latency() / 2; /* XXX */
+        //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+    }
+
+    // CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
+    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
+    int64_t durationUs = (int64_t)((int32_t)numFramesPlayed * 1000000LL / mSampleRateHz)
+            + nowUs - numFramesPlayedAt;
+    if (durationUs < 0) {
+        // Occurs when numFramesPlayed position is very small and the following:
+        // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
+        //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
+        // (2) In case 3, using getPosition and adding mAudioSink->latency() to
+        //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
+        //
+        // Both of these are transitory conditions.
+        ALOGV("getPlayedOutDurationUs: negative duration %lld set to zero", (long long)durationUs);
+        durationUs = 0;
+    }
+    ALOGV("getPlayedOutDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
+            (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
+    return durationUs;
+}
+
 status_t MediaPlayerService::AudioOutput::getFramesWritten(uint32_t *frameswritten) const
 {
     Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
-    *frameswritten = mBytesWritten / mFrameSize;
-    return OK;
+    ExtendedTimestamp ets;
+    status_t status = mTrack->getTimestamp(&ets);
+    if (status == OK || status == WOULD_BLOCK) {
+        *frameswritten = (uint32_t)ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT];
+    }
+    return status;
 }
 
 status_t MediaPlayerService::AudioOutput::setParameters(const String8& keyValuePairs)
@@ -1511,7 +1617,19 @@
         }
 
         if ((mRecycledTrack->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
-            mRecycledTrack->flush();
+            int32_t msec = 0;
+            if (!mRecycledTrack->stopped()) { // check if active
+                 (void)mRecycledTrack->pendingDuration(&msec);
+            }
+            mRecycledTrack->stop(); // ensure full data drain
+            ALOGD("deleting recycled track, waiting for data drain (%d msec)", msec);
+            if (msec > 0) {
+                static const int32_t WAIT_LIMIT_MS = 3000;
+                if (msec > WAIT_LIMIT_MS) {
+                    msec = WAIT_LIMIT_MS;
+                }
+                usleep(msec * 1000LL);
+            }
         }
         // An offloaded track isn't flushed because the STREAM_END is reported
         // slightly prematurely to allow time for the gapless track switch
@@ -1667,6 +1785,14 @@
                     mAttributes,
                     doNotReconnect);
         } else {
+            // TODO: Due to buffer memory concerns, we use a max target playback speed
+            // based on mPlaybackRate at the time of open (instead of kMaxRequiredSpeed),
+            // also clamping the target speed to 1.0 <= targetSpeed <= kMaxRequiredSpeed.
+            const float targetSpeed =
+                    std::min(std::max(mPlaybackRate.mSpeed, 1.0f), kMaxRequiredSpeed);
+            ALOGW_IF(targetSpeed != mPlaybackRate.mSpeed,
+                    "track target speed:%f clamped from playback speed:%f",
+                    targetSpeed, mPlaybackRate.mSpeed);
             t = new AudioTrack(
                     mStreamType,
                     sampleRate,
@@ -1683,7 +1809,8 @@
                     mUid,
                     mPid,
                     mAttributes,
-                    doNotReconnect);
+                    doNotReconnect,
+                    targetSpeed);
         }
 
         if ((t == 0) || (t->initCheck() != NO_ERROR)) {
@@ -1703,7 +1830,7 @@
 
         if (!bothOffloaded) {
             if (mRecycledTrack->frameCount() != t->frameCount()) {
-                ALOGV("framecount differs: %u/%u frames",
+                ALOGV("framecount differs: %zu/%zu frames",
                       mRecycledTrack->frameCount(), t->frameCount());
                 reuse = false;
             }
@@ -1738,10 +1865,6 @@
     mFlags = flags;
     mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
     mFrameSize = t->frameSize();
-    uint32_t pos;
-    if (t->getPosition(&pos) == OK) {
-        mBytesWritten = uint64_t(pos) * mFrameSize;
-    }
     mTrack = t;
 
     status_t res = NO_ERROR;
@@ -1835,7 +1958,6 @@
                 mNextOutput->mRecycledTrack = mTrack;
                 mNextOutput->mSampleRateHz = mSampleRateHz;
                 mNextOutput->mMsecsPerFrame = mMsecsPerFrame;
-                mNextOutput->mBytesWritten = mBytesWritten;
                 mNextOutput->mFlags = mFlags;
                 mNextOutput->mFrameSize = mFrameSize;
                 close_l();
@@ -1860,11 +1982,7 @@
 
     //ALOGV("write(%p, %u)", buffer, size);
     if (mTrack != 0) {
-        ssize_t ret = mTrack->write(buffer, size, blocking);
-        if (ret >= 0) {
-            mBytesWritten += ret;
-        }
-        return ret;
+        return mTrack->write(buffer, size, blocking);
     }
     return NO_INIT;
 }
@@ -1873,7 +1991,6 @@
 {
     ALOGV("stop");
     Mutex::Autolock lock(mLock);
-    mBytesWritten = 0;
     if (mTrack != 0) mTrack->stop();
 }
 
@@ -1881,7 +1998,6 @@
 {
     ALOGV("flush");
     Mutex::Autolock lock(mLock);
-    mBytesWritten = 0;
     if (mTrack != 0) mTrack->flush();
 }
 
@@ -2007,7 +2123,6 @@
 
         ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
 
-        me->mBytesWritten += actualSize;  // benign race with reader.
         buffer->size = actualSize;
         } break;
 
@@ -2034,7 +2149,7 @@
         //
         // The underrun event is sent once per track underrun; the condition is reset
         // when more data is sent to the AudioTrack.
-        ALOGI("callbackwrapper: EVENT_UNDERRUN (discarded)");
+        ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
         break;
 
     default:
@@ -2044,7 +2159,7 @@
     data->unlock();
 }
 
-int MediaPlayerService::AudioOutput::getSessionId() const
+audio_session_t MediaPlayerService::AudioOutput::getSessionId() const
 {
     Mutex::Autolock lock(mLock);
     return mSessionId;
@@ -2057,6 +2172,19 @@
     return mTrack->getSampleRate();
 }
 
+int64_t MediaPlayerService::AudioOutput::getBufferDurationInUs() const
+{
+    Mutex::Autolock lock(mLock);
+    if (mTrack == 0) {
+        return 0;
+    }
+    int64_t duration;
+    if (mTrack->getBufferDurationInUs(&duration) != OK) {
+        return 0;
+    }
+    return duration;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 struct CallbackThread : public Thread {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 60d4617..01977f5 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -75,7 +75,7 @@
         class CallbackData;
 
      public:
-                                AudioOutput(int sessionId, int uid, int pid,
+                                AudioOutput(audio_session_t sessionId, int uid, int pid,
                                         const audio_attributes_t * attr);
         virtual                 ~AudioOutput();
 
@@ -88,9 +88,11 @@
         virtual float           msecsPerFrame() const;
         virtual status_t        getPosition(uint32_t *position) const;
         virtual status_t        getTimestamp(AudioTimestamp &ts) const;
+        virtual int64_t         getPlayedOutDurationUs(int64_t nowUs) const;
         virtual status_t        getFramesWritten(uint32_t *frameswritten) const;
-        virtual int             getSessionId() const;
+        virtual audio_session_t getSessionId() const;
         virtual uint32_t        getSampleRate() const;
+        virtual int64_t         getBufferDurationInUs() const;
 
         virtual status_t        open(
                 uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
@@ -140,7 +142,6 @@
         AudioCallback           mCallback;
         void *                  mCallbackCookie;
         CallbackData *          mCallbackData;
-        uint64_t                mBytesWritten;
         audio_stream_type_t     mStreamType;
         audio_attributes_t *    mAttributes;
         float                   mLeftVolume;
@@ -149,7 +150,7 @@
         uint32_t                mSampleRateHz; // sample rate of the content, as set in open()
         float                   mMsecsPerFrame;
         size_t                  mFrameSize;
-        int                     mSessionId;
+        audio_session_t         mSessionId;
         int                     mUid;
         int                     mPid;
         float                   mSendLevel;
@@ -213,12 +214,11 @@
     void    removeMediaRecorderClient(wp<MediaRecorderClient> client);
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever();
 
-    virtual sp<IMediaPlayer>    create(const sp<IMediaPlayerClient>& client, int audioSessionId);
+    virtual sp<IMediaPlayer>    create(const sp<IMediaPlayerClient>& client,
+                                       audio_session_t audioSessionId);
 
     virtual sp<IMediaCodecList> getCodecList() const;
     virtual sp<IOMX>            getOMX();
-    virtual sp<ICrypto>         makeCrypto();
-    virtual sp<IDrm>            makeDrm();
     virtual sp<IHDCP>           makeHDCP(bool createEncryptionModule);
 
     virtual sp<IRemoteDisplay> listenForRemoteDisplay(const String16 &opPackageName,
@@ -227,6 +227,14 @@
 
             void                removeClient(wp<Client> client);
 
+    enum {
+        MEDIASERVER_PROCESS_DEATH = 0,
+        MEDIAEXTRACTOR_PROCESS_DEATH = 1,
+        MEDIACODEC_PROCESS_DEATH = 2,
+        AUDIO_PROCESS_DEATH = 3,   // currently no need to track this
+        CAMERA_PROCESS_DEATH = 4
+    };
+
     // For battery usage tracking purpose
     struct BatteryUsageInfo {
         // how many streams are being played by one UID
@@ -331,15 +339,31 @@
                 pid_t           pid() const { return mPid; }
         virtual status_t        dump(int fd, const Vector<String16>& args);
 
-                int             getAudioSessionId() { return mAudioSessionId; }
+                audio_session_t getAudioSessionId() { return mAudioSessionId; }
 
     private:
+        class ServiceDeathNotifier: public IBinder::DeathRecipient
+        {
+        public:
+            ServiceDeathNotifier(
+                    const sp<IBinder>& service,
+                    const sp<MediaPlayerBase>& listener,
+                    int which);
+            virtual ~ServiceDeathNotifier();
+            virtual void binderDied(const wp<IBinder>& who);
+
+        private:
+            int mWhich;
+            sp<IBinder> mService;
+            wp<MediaPlayerBase> mListener;
+        };
+
         friend class MediaPlayerService;
                                 Client( const sp<MediaPlayerService>& service,
                                         pid_t pid,
                                         int32_t connId,
                                         const sp<IMediaPlayerClient>& client,
-                                        int audioSessionId,
+                                        audio_session_t audioSessionId,
                                         uid_t uid);
                                 Client();
         virtual                 ~Client();
@@ -374,7 +398,7 @@
                     status_t                    mStatus;
                     bool                        mLoop;
                     int32_t                     mConnId;
-                    int                         mAudioSessionId;
+                    audio_session_t             mAudioSessionId;
                     audio_attributes_t *        mAudioAttributes;
                     uid_t                       mUID;
                     sp<ANativeWindow>           mConnectedWindow;
@@ -393,6 +417,8 @@
         // getMetadata clears this set.
         media::Metadata::Filter mMetadataUpdated;  // protected by mLock
 
+        sp<IBinder::DeathRecipient> mExtractorDeathListener;
+        sp<IBinder::DeathRecipient> mCodecDeathListener;
 #if CALLBACK_ANTAGONIZER
                     Antagonizer*                mAntagonizer;
 #endif
@@ -408,7 +434,6 @@
                 SortedVector< wp<MediaRecorderClient> > mMediaRecorderClients;
                 int32_t                     mNextConnId;
                 sp<IOMX>                    mOMX;
-                sp<ICrypto>                 mCrypto;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index f761dec..d011d70 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -46,9 +46,6 @@
 const char* recordAudioPermission = "android.permission.RECORD_AUDIO";
 
 static bool checkPermission(const char* permissionString) {
-#ifndef HAVE_ANDROID_OS
-    return true;
-#endif
     if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
     bool ok = checkCallingPermission(String16(permissionString));
     if (!ok) ALOGE("Request requires %s", permissionString);
@@ -79,7 +76,7 @@
 
 
 
-status_t MediaRecorderClient::setCamera(const sp<ICamera>& camera,
+status_t MediaRecorderClient::setCamera(const sp<hardware::ICamera>& camera,
                                         const sp<ICameraRecordingProxy>& proxy)
 {
     ALOGV("setCamera");
@@ -166,7 +163,7 @@
 
 status_t MediaRecorderClient::setOutputFile(int fd, int64_t offset, int64_t length)
 {
-    ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length);
+    ALOGV("setOutputFile(%d, %lld, %lld)", fd, (long long)offset, (long long)length);
     Mutex::Autolock lock(mLock);
     if (mRecorder == NULL) {
         ALOGE("recorder is not initialized");
@@ -253,6 +250,29 @@
     return mRecorder->stop();
 }
 
+status_t MediaRecorderClient::pause()
+{
+    ALOGV("pause");
+    Mutex::Autolock lock(mLock);
+    if (mRecorder == NULL) {
+        ALOGE("recorder is not initialized");
+        return NO_INIT;
+    }
+    return mRecorder->pause();
+
+}
+
+status_t MediaRecorderClient::resume()
+{
+    ALOGV("resume");
+    Mutex::Autolock lock(mLock);
+    if (mRecorder == NULL) {
+        ALOGE("recorder is not initialized");
+        return NO_INIT;
+    }
+    return mRecorder->resume();
+}
+
 status_t MediaRecorderClient::init()
 {
     ALOGV("init");
@@ -315,6 +335,28 @@
     release();
 }
 
+MediaRecorderClient::ServiceDeathNotifier::ServiceDeathNotifier(
+        const sp<IBinder>& service,
+        const sp<IMediaRecorderClient>& listener,
+        int which) {
+    mService = service;
+    mListener = listener;
+    mWhich = which;
+}
+
+MediaRecorderClient::ServiceDeathNotifier::~ServiceDeathNotifier() {
+    mService->unlinkToDeath(this);
+}
+
+void  MediaRecorderClient::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
+    sp<IMediaRecorderClient> listener = mListener.promote();
+    if (listener != NULL) {
+        listener->notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
+    } else {
+        ALOGW("listener for process %d death is gone", mWhich);
+    }
+}
+
 status_t MediaRecorderClient::setListener(const sp<IMediaRecorderClient>& listener)
 {
     ALOGV("setListener");
@@ -323,7 +365,20 @@
         ALOGE("recorder is not initialized");
         return NO_INIT;
     }
-    return mRecorder->setListener(listener);
+    mRecorder->setListener(listener);
+
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.camera"));
+    mCameraDeathListener = new ServiceDeathNotifier(binder, listener,
+            MediaPlayerService::CAMERA_PROCESS_DEATH);
+    binder->linkToDeath(mCameraDeathListener);
+
+    binder = sm->getService(String16("media.codec"));
+    mCodecDeathListener = new ServiceDeathNotifier(binder, listener,
+            MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
+    binder->linkToDeath(mCodecDeathListener);
+
+    return OK;
 }
 
 status_t MediaRecorderClient::setClientName(const String16& clientName) {
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 05130d4..eceb653 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -29,8 +29,24 @@
 
 class MediaRecorderClient : public BnMediaRecorder
 {
+    class ServiceDeathNotifier: public IBinder::DeathRecipient
+    {
+    public:
+        ServiceDeathNotifier(
+                const sp<IBinder>& service,
+                const sp<IMediaRecorderClient>& listener,
+                int which);
+        virtual ~ServiceDeathNotifier();
+        virtual void binderDied(const wp<IBinder>& who);
+
+    private:
+        int mWhich;
+        sp<IBinder> mService;
+        wp<IMediaRecorderClient> mListener;
+    };
+
 public:
-    virtual     status_t   setCamera(const sp<ICamera>& camera,
+    virtual     status_t   setCamera(const sp<hardware::ICamera>& camera,
                                     const sp<ICameraRecordingProxy>& proxy);
     virtual     status_t   setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
     virtual     status_t   setVideoSource(int vs);
@@ -51,6 +67,8 @@
     virtual     status_t   start();
     virtual     status_t   stop();
     virtual     status_t   reset();
+    virtual     status_t   pause();
+    virtual     status_t   resume();
     virtual     status_t   init();
     virtual     status_t   close();
     virtual     status_t   release();
@@ -67,6 +85,9 @@
                                                                const String16& opPackageName);
     virtual                ~MediaRecorderClient();
 
+    sp<IBinder::DeathRecipient> mCameraDeathListener;
+    sp<IBinder::DeathRecipient> mCodecDeathListener;
+
     pid_t                  mPid;
     Mutex                  mLock;
     MediaRecorderBase      *mRecorder;
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index f6acdf6..793f476 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -35,6 +35,7 @@
 #include <media/MediaMetadataRetrieverInterface.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/DataSource.h>
+#include <media/stagefright/Utils.h>
 #include <private/media/VideoFrame.h>
 #include "MetadataRetrieverClient.h"
 #include "StagefrightMetadataRetriever.h"
@@ -133,7 +134,8 @@
 
 status_t MetadataRetrieverClient::setDataSource(int fd, int64_t offset, int64_t length)
 {
-    ALOGV("setDataSource fd=%d, offset=%lld, length=%lld", fd, offset, length);
+    ALOGV("setDataSource fd=%d (%s), offset=%lld, length=%lld",
+            fd, nameForFd(fd).c_str(), (long long) offset, (long long) length);
     Mutex::Autolock lock(mLock);
     struct stat sb;
     int ret = fstat(fd, &sb);
@@ -141,20 +143,20 @@
         ALOGE("fstat(%d) failed: %d, %s", fd, ret, strerror(errno));
         return BAD_VALUE;
     }
-    ALOGV("st_dev  = %llu", static_cast<uint64_t>(sb.st_dev));
+    ALOGV("st_dev  = %llu", static_cast<unsigned long long>(sb.st_dev));
     ALOGV("st_mode = %u", sb.st_mode);
     ALOGV("st_uid  = %lu", static_cast<unsigned long>(sb.st_uid));
     ALOGV("st_gid  = %lu", static_cast<unsigned long>(sb.st_gid));
-    ALOGV("st_size = %llu", sb.st_size);
+    ALOGV("st_size = %llu", static_cast<unsigned long long>(sb.st_size));
 
     if (offset >= sb.st_size) {
-        ALOGE("offset (%lld) bigger than file size (%llu)", offset, sb.st_size);
-        ::close(fd);
+        ALOGE("offset (%lld) bigger than file size (%llu)",
+                (long long)offset, (unsigned long long)sb.st_size);
         return BAD_VALUE;
     }
     if (offset + length > sb.st_size) {
         length = sb.st_size - offset;
-        ALOGV("calculated length = %lld", length);
+        ALOGV("calculated length = %lld", (long long)length);
     }
 
     player_type playerType =
@@ -165,12 +167,10 @@
     ALOGV("player type = %d", playerType);
     sp<MediaMetadataRetrieverBase> p = createRetriever(playerType);
     if (p == NULL) {
-        ::close(fd);
         return NO_INIT;
     }
     status_t status = p->setDataSource(fd, offset, length);
     if (status == NO_ERROR) mRetriever = p;
-    ::close(fd);
     return status;
 }
 
@@ -195,7 +195,7 @@
 
 sp<IMemory> MetadataRetrieverClient::getFrameAtTime(int64_t timeUs, int option)
 {
-    ALOGV("getFrameAtTime: time(%lld us) option(%d)", timeUs, option);
+    ALOGV("getFrameAtTime: time(%lld us) option(%d)", (long long)timeUs, option);
     Mutex::Autolock lock(mLock);
     Mutex::Autolock glock(sLock);
     mThumbnail.clear();
@@ -217,7 +217,7 @@
     }
     mThumbnail = new MemoryBase(heap, 0, size);
     if (mThumbnail == NULL) {
-        ALOGE("not enough memory for VideoFrame size=%u", size);
+        ALOGE("not enough memory for VideoFrame size=%zu", size);
         delete frame;
         return NULL;
     }
@@ -259,7 +259,7 @@
     }
     mAlbumArt = new MemoryBase(heap, 0, size);
     if (mAlbumArt == NULL) {
-        ALOGE("not enough memory for MediaAlbumArt size=%u", size);
+        ALOGE("not enough memory for MediaAlbumArt size=%zu", size);
         delete albumArt;
         return NULL;
     }
diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp
deleted file mode 100644
index 3fedd9b..0000000
--- a/media/libmediaplayerservice/StagefrightPlayer.cpp
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "StagefrightPlayer"
-#include <utils/Log.h>
-
-#include "StagefrightPlayer.h"
-
-#include "AwesomePlayer.h"
-
-#include <media/Metadata.h>
-#include <media/stagefright/MediaExtractor.h>
-
-namespace android {
-
-StagefrightPlayer::StagefrightPlayer()
-    : mPlayer(new AwesomePlayer) {
-    ALOGV("StagefrightPlayer");
-
-    mPlayer->setListener(this);
-}
-
-StagefrightPlayer::~StagefrightPlayer() {
-    ALOGV("~StagefrightPlayer");
-    reset();
-
-    delete mPlayer;
-    mPlayer = NULL;
-}
-
-status_t StagefrightPlayer::initCheck() {
-    ALOGV("initCheck");
-    return OK;
-}
-
-status_t StagefrightPlayer::setUID(uid_t uid) {
-    mPlayer->setUID(uid);
-
-    return OK;
-}
-
-status_t StagefrightPlayer::setDataSource(
-        const sp<IMediaHTTPService> &httpService,
-        const char *url,
-        const KeyedVector<String8, String8> *headers) {
-    return mPlayer->setDataSource(httpService, url, headers);
-}
-
-// Warning: The filedescriptor passed into this method will only be valid until
-// the method returns, if you want to keep it, dup it!
-status_t StagefrightPlayer::setDataSource(int fd, int64_t offset, int64_t length) {
-    ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
-    return mPlayer->setDataSource(dup(fd), offset, length);
-}
-
-status_t StagefrightPlayer::setDataSource(const sp<IStreamSource> &source) {
-    return mPlayer->setDataSource(source);
-}
-
-status_t StagefrightPlayer::setVideoSurfaceTexture(
-        const sp<IGraphicBufferProducer> &bufferProducer) {
-    ALOGV("setVideoSurfaceTexture");
-
-    return mPlayer->setSurfaceTexture(bufferProducer);
-}
-
-status_t StagefrightPlayer::prepare() {
-    return mPlayer->prepare();
-}
-
-status_t StagefrightPlayer::prepareAsync() {
-    return mPlayer->prepareAsync();
-}
-
-status_t StagefrightPlayer::start() {
-    ALOGV("start");
-
-    return mPlayer->play();
-}
-
-status_t StagefrightPlayer::stop() {
-    ALOGV("stop");
-
-    return pause();  // what's the difference?
-}
-
-status_t StagefrightPlayer::pause() {
-    ALOGV("pause");
-
-    return mPlayer->pause();
-}
-
-bool StagefrightPlayer::isPlaying() {
-    ALOGV("isPlaying");
-    return mPlayer->isPlaying();
-}
-
-status_t StagefrightPlayer::seekTo(int msec) {
-    ALOGV("seekTo %.2f secs", msec / 1E3);
-
-    status_t err = mPlayer->seekTo((int64_t)msec * 1000);
-
-    return err;
-}
-
-status_t StagefrightPlayer::getCurrentPosition(int *msec) {
-    ALOGV("getCurrentPosition");
-
-    int64_t positionUs;
-    status_t err = mPlayer->getPosition(&positionUs);
-
-    if (err != OK) {
-        return err;
-    }
-
-    *msec = (positionUs + 500) / 1000;
-
-    return OK;
-}
-
-status_t StagefrightPlayer::getDuration(int *msec) {
-    ALOGV("getDuration");
-
-    int64_t durationUs;
-    status_t err = mPlayer->getDuration(&durationUs);
-
-    if (err != OK) {
-        *msec = 0;
-        return OK;
-    }
-
-    *msec = (durationUs + 500) / 1000;
-
-    return OK;
-}
-
-status_t StagefrightPlayer::reset() {
-    ALOGV("reset");
-
-    mPlayer->reset();
-
-    return OK;
-}
-
-status_t StagefrightPlayer::setLooping(int loop) {
-    ALOGV("setLooping");
-
-    return mPlayer->setLooping(loop);
-}
-
-player_type StagefrightPlayer::playerType() {
-    ALOGV("playerType");
-    return STAGEFRIGHT_PLAYER;
-}
-
-status_t StagefrightPlayer::invoke(const Parcel &request, Parcel *reply) {
-    ALOGV("invoke()");
-    return mPlayer->invoke(request, reply);
-}
-
-void StagefrightPlayer::setAudioSink(const sp<AudioSink> &audioSink) {
-    MediaPlayerInterface::setAudioSink(audioSink);
-
-    mPlayer->setAudioSink(audioSink);
-}
-
-status_t StagefrightPlayer::setParameter(int key, const Parcel &request) {
-    ALOGV("setParameter(key=%d)", key);
-    return mPlayer->setParameter(key, request);
-}
-
-status_t StagefrightPlayer::getParameter(int key, Parcel *reply) {
-    ALOGV("getParameter");
-    return mPlayer->getParameter(key, reply);
-}
-
-status_t StagefrightPlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
-    return mPlayer->setPlaybackSettings(rate);
-}
-
-status_t StagefrightPlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
-    return mPlayer->getPlaybackSettings(rate);
-}
-
-status_t StagefrightPlayer::getMetadata(
-        const media::Metadata::Filter& /* ids */, Parcel *records) {
-    using media::Metadata;
-
-    uint32_t flags = mPlayer->flags();
-
-    Metadata metadata(records);
-
-    metadata.appendBool(
-            Metadata::kPauseAvailable,
-            flags & MediaExtractor::CAN_PAUSE);
-
-    metadata.appendBool(
-            Metadata::kSeekBackwardAvailable,
-            flags & MediaExtractor::CAN_SEEK_BACKWARD);
-
-    metadata.appendBool(
-            Metadata::kSeekForwardAvailable,
-            flags & MediaExtractor::CAN_SEEK_FORWARD);
-
-    metadata.appendBool(
-            Metadata::kSeekAvailable,
-            flags & MediaExtractor::CAN_SEEK);
-
-    return OK;
-}
-
-status_t StagefrightPlayer::dump(int fd, const Vector<String16> &args) const {
-    return mPlayer->dump(fd, args);
-}
-
-}  // namespace android
diff --git a/media/libmediaplayerservice/StagefrightPlayer.h b/media/libmediaplayerservice/StagefrightPlayer.h
deleted file mode 100644
index 96013df..0000000
--- a/media/libmediaplayerservice/StagefrightPlayer.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
-**
-** Copyright 2009, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_STAGEFRIGHTPLAYER_H
-#define ANDROID_STAGEFRIGHTPLAYER_H
-
-#include <media/MediaPlayerInterface.h>
-
-namespace android {
-
-struct AwesomePlayer;
-
-class StagefrightPlayer : public MediaPlayerInterface {
-public:
-    StagefrightPlayer();
-    virtual ~StagefrightPlayer();
-
-    virtual status_t initCheck();
-
-    virtual status_t setUID(uid_t uid);
-
-    virtual status_t setDataSource(
-            const sp<IMediaHTTPService> &httpService,
-            const char *url,
-            const KeyedVector<String8, String8> *headers);
-
-    virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
-
-    virtual status_t setDataSource(const sp<IStreamSource> &source);
-
-    virtual status_t setVideoSurfaceTexture(
-            const sp<IGraphicBufferProducer> &bufferProducer);
-    virtual status_t prepare();
-    virtual status_t prepareAsync();
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t pause();
-    virtual bool isPlaying();
-    virtual status_t seekTo(int msec);
-    virtual status_t getCurrentPosition(int *msec);
-    virtual status_t getDuration(int *msec);
-    virtual status_t reset();
-    virtual status_t setLooping(int loop);
-    virtual player_type playerType();
-    virtual status_t invoke(const Parcel &request, Parcel *reply);
-    virtual void setAudioSink(const sp<AudioSink> &audioSink);
-    virtual status_t setParameter(int key, const Parcel &request);
-    virtual status_t getParameter(int key, Parcel *reply);
-    virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate);
-    virtual status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
-
-    virtual status_t getMetadata(
-            const media::Metadata::Filter& ids, Parcel *records);
-
-    virtual status_t dump(int fd, const Vector<String16> &args) const;
-
-private:
-    AwesomePlayer *mPlayer;
-
-    StagefrightPlayer(const StagefrightPlayer &);
-    StagefrightPlayer &operator=(const StagefrightPlayer &);
-};
-
-}  // namespace android
-
-#endif  // ANDROID_STAGEFRIGHTPLAYER_H
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index e521fae..97ba76b 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -22,6 +22,8 @@
 #include "WebmWriter.h"
 #include "StagefrightRecorder.h"
 
+#include <android/hardware/ICamera.h>
+
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 
@@ -41,10 +43,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaCodecSource.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
 #include <media/MediaProfiles.h>
-#include <camera/ICamera.h>
 #include <camera/CameraParameters.h>
 
 #include <utils/Errors.h>
@@ -217,7 +216,7 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setCamera(const sp<ICamera> &camera,
+status_t StagefrightRecorder::setCamera(const sp<hardware::ICamera> &camera,
                                         const sp<ICameraRecordingProxy> &proxy) {
     ALOGV("setCamera");
     if (camera == 0) {
@@ -249,7 +248,7 @@
 }
 
 status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) {
-    ALOGV("setOutputFile: %d, %lld, %lld", fd, offset, length);
+    ALOGV("setOutputFile: %d, %lld, %lld", fd, (long long)offset, (long long)length);
     // These don't make any sense, do they?
     CHECK_EQ(offset, 0ll);
     CHECK_EQ(length, 0ll);
@@ -416,39 +415,40 @@
 }
 
 status_t StagefrightRecorder::setParamMaxFileDurationUs(int64_t timeUs) {
-    ALOGV("setParamMaxFileDurationUs: %lld us", timeUs);
+    ALOGV("setParamMaxFileDurationUs: %lld us", (long long)timeUs);
 
     // This is meant for backward compatibility for MediaRecorder.java
     if (timeUs <= 0) {
-        ALOGW("Max file duration is not positive: %lld us. Disabling duration limit.", timeUs);
+        ALOGW("Max file duration is not positive: %lld us. Disabling duration limit.",
+                (long long)timeUs);
         timeUs = 0; // Disable the duration limit for zero or negative values.
     } else if (timeUs <= 100000LL) {  // XXX: 100 milli-seconds
-        ALOGE("Max file duration is too short: %lld us", timeUs);
+        ALOGE("Max file duration is too short: %lld us", (long long)timeUs);
         return BAD_VALUE;
     }
 
     if (timeUs <= 15 * 1000000LL) {
-        ALOGW("Target duration (%lld us) too short to be respected", timeUs);
+        ALOGW("Target duration (%lld us) too short to be respected", (long long)timeUs);
     }
     mMaxFileDurationUs = timeUs;
     return OK;
 }
 
 status_t StagefrightRecorder::setParamMaxFileSizeBytes(int64_t bytes) {
-    ALOGV("setParamMaxFileSizeBytes: %lld bytes", bytes);
+    ALOGV("setParamMaxFileSizeBytes: %lld bytes", (long long)bytes);
 
     // This is meant for backward compatibility for MediaRecorder.java
     if (bytes <= 0) {
         ALOGW("Max file size is not positive: %lld bytes. "
-             "Disabling file size limit.", bytes);
+             "Disabling file size limit.", (long long)bytes);
         bytes = 0; // Disable the file size limit for zero or negative values.
     } else if (bytes <= 1024) {  // XXX: 1 kB
-        ALOGE("Max file size is too small: %lld bytes", bytes);
+        ALOGE("Max file size is too small: %lld bytes", (long long)bytes);
         return BAD_VALUE;
     }
 
     if (bytes <= 100 * 1024) {
-        ALOGW("Target file size (%lld bytes) is too small to be respected", bytes);
+        ALOGW("Target file size (%lld bytes) is too small to be respected", (long long)bytes);
     }
 
     mMaxFileSizeBytes = bytes;
@@ -500,9 +500,9 @@
 }
 
 status_t StagefrightRecorder::setParamTrackTimeStatus(int64_t timeDurationUs) {
-    ALOGV("setParamTrackTimeStatus: %lld", timeDurationUs);
+    ALOGV("setParamTrackTimeStatus: %lld", (long long)timeDurationUs);
     if (timeDurationUs < 20000) {  // Infeasible if shorter than 20 ms?
-        ALOGE("Tracking time duration too short: %lld us", timeDurationUs);
+        ALOGE("Tracking time duration too short: %lld us", (long long)timeDurationUs);
         return BAD_VALUE;
     }
     mTrackEveryTimeDurationUs = timeDurationUs;
@@ -585,7 +585,7 @@
 
     // Not allowing time more than a day
     if (timeUs <= 0 || timeUs > 86400*1E6) {
-        ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", timeUs);
+        ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", (long long)timeUs);
         return BAD_VALUE;
     }
 
@@ -782,8 +782,9 @@
         return INVALID_OPERATION;
     }
 
-    // Get UID here for permission checking
+    // Get UID and PID here for permission checking
     mClientUid = IPCThreadState::self()->getCallingUid();
+    mClientPid = IPCThreadState::self()->getCallingPid();
 
     status_t status = OK;
 
@@ -907,7 +908,7 @@
     return status;
 }
 
-sp<MediaSource> StagefrightRecorder::createAudioSource() {
+sp<MediaCodecSource> StagefrightRecorder::createAudioSource() {
     int32_t sourceSampleRate = mSampleRate;
 
     if (mCaptureFpsEnable && mCaptureFps >= mFrameRate) {
@@ -933,7 +934,9 @@
                 mOpPackageName,
                 sourceSampleRate,
                 mAudioChannels,
-                mSampleRate);
+                mSampleRate,
+                mClientUid,
+                mClientPid);
 
     status_t err = audioSource->initCheck();
 
@@ -982,7 +985,7 @@
     }
     format->setInt32("priority", 0 /* realtime */);
 
-    sp<MediaSource> audioEncoder =
+    sp<MediaCodecSource> audioEncoder =
             MediaCodecSource::Create(mLooper, format, audioSource);
     mAudioSourceNode = audioSource;
 
@@ -1041,13 +1044,14 @@
         return status;
     }
 
-    sp<MediaSource> audioEncoder = createAudioSource();
+    sp<MediaCodecSource> audioEncoder = createAudioSource();
     if (audioEncoder == NULL) {
         return UNKNOWN_ERROR;
     }
 
     CHECK(mWriter != 0);
     mWriter->addSource(audioEncoder);
+    mAudioEncoderSource = audioEncoder;
 
     if (mMaxFileDurationUs != 0) {
         mWriter->setMaxFileDuration(mMaxFileDurationUs);
@@ -1075,10 +1079,11 @@
         return BAD_VALUE;
     }
 
-    sp<MediaSource> source;
+    sp<MediaCodecSource> source;
 
     if (mAudioSource != AUDIO_SOURCE_CNT) {
         source = createAudioSource();
+        mAudioEncoderSource = source;
     } else {
         setDefaultVideoEncoderIfNecessary();
 
@@ -1092,6 +1097,7 @@
         if (err != OK) {
             return err;
         }
+        mVideoEncoderSource = source;
     }
 
     mWriter = new ARTPWriter(mOutputFd);
@@ -1132,7 +1138,7 @@
             return err;
         }
 
-        sp<MediaSource> encoder;
+        sp<MediaCodecSource> encoder;
         err = setupVideoEncoder(mediaSource, &encoder);
 
         if (err != OK) {
@@ -1140,6 +1146,7 @@
         }
 
         writer->addSource(encoder);
+        mVideoEncoderSource = encoder;
     }
 
     if (mMaxFileDurationUs != 0) {
@@ -1213,18 +1220,6 @@
 }
 
 status_t StagefrightRecorder::checkVideoEncoderCapabilities() {
-    /* hardware codecs must support camera source meta data mode */
-    Vector<CodecCapabilities> codecs;
-    OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
-    QueryCodecs(
-            client.interface(),
-            (mVideoEncoder == VIDEO_ENCODER_H263 ? MEDIA_MIMETYPE_VIDEO_H263 :
-             mVideoEncoder == VIDEO_ENCODER_MPEG_4_SP ? MEDIA_MIMETYPE_VIDEO_MPEG4 :
-             mVideoEncoder == VIDEO_ENCODER_VP8 ? MEDIA_MIMETYPE_VIDEO_VP8 :
-             mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""),
-            false /* decoder */, true /* hwCodec */, &codecs);
-
     if (!mCaptureFpsEnable) {
         // Dont clip for time lapse capture as encoder will have enough
         // time to encode because of slow capture rate of time lapse.
@@ -1441,18 +1436,18 @@
     if (mCaptureFpsEnable) {
         if (mTimeBetweenCaptureUs < 0) {
             ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
-                mTimeBetweenCaptureUs);
+                    (long long)mTimeBetweenCaptureUs);
             return BAD_VALUE;
         }
 
         mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
-                mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
+                mCamera, mCameraProxy, mCameraId, mClientName, mClientUid, mClientPid,
                 videoSize, mFrameRate, mPreviewSurface,
                 mTimeBetweenCaptureUs);
         *cameraSource = mCameraSourceTimeLapse;
     } else {
         *cameraSource = CameraSource::CreateFromCamera(
-                mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
+                mCamera, mCameraProxy, mCameraId, mClientName, mClientUid, mClientPid,
                 videoSize, mFrameRate,
                 mPreviewSurface);
     }
@@ -1481,15 +1476,15 @@
 
     CHECK(mFrameRate != -1);
 
-    mIsMetaDataStoredInVideoBuffers =
-        (*cameraSource)->isMetaDataStoredInVideoBuffers();
+    mMetaDataStoredInVideoBuffers =
+        (*cameraSource)->metaDataStoredInVideoBuffers();
 
     return OK;
 }
 
 status_t StagefrightRecorder::setupVideoEncoder(
         sp<MediaSource> cameraSource,
-        sp<MediaSource> *source) {
+        sp<MediaCodecSource> *source) {
     source->clear();
 
     sp<AMessage> format = new AMessage();
@@ -1511,6 +1506,10 @@
             format->setString("mime", MEDIA_MIMETYPE_VIDEO_VP8);
             break;
 
+        case VIDEO_ENCODER_HEVC:
+            format->setString("mime", MEDIA_MIMETYPE_VIDEO_HEVC);
+            break;
+
         default:
             CHECK(!"Should not be here, unsupported video encoding.");
             break;
@@ -1535,14 +1534,14 @@
         format->setInt32("width", mVideoWidth);
         format->setInt32("height", mVideoHeight);
         format->setInt32("stride", mVideoWidth);
-        format->setInt32("slice-height", mVideoWidth);
+        format->setInt32("slice-height", mVideoHeight);
         format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
 
         // set up time lapse/slow motion for surface source
         if (mCaptureFpsEnable) {
             if (mTimeBetweenCaptureUs <= 0) {
                 ALOGE("Invalid mTimeBetweenCaptureUs value: %lld",
-                        mTimeBetweenCaptureUs);
+                        (long long)mTimeBetweenCaptureUs);
                 return BAD_VALUE;
             }
             format->setInt64("time-lapse", mTimeBetweenCaptureUs);
@@ -1568,13 +1567,16 @@
         format->setFloat("operating-rate", mCaptureFps);
     }
 
-    uint32_t flags = 0;
-    if (mIsMetaDataStoredInVideoBuffers) {
-        flags |= MediaCodecSource::FLAG_USE_METADATA_INPUT;
+    if (mMetaDataStoredInVideoBuffers != kMetadataBufferTypeInvalid) {
+        format->setInt32("android._input-metadata-buffer-type", mMetaDataStoredInVideoBuffers);
     }
 
+    uint32_t flags = 0;
     if (cameraSource == NULL) {
         flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;
+    } else {
+        // require dataspace setup even if not using surface input
+        format->setInt32("android._using-recorder", 1);
     }
 
     sp<MediaCodecSource> encoder = MediaCodecSource::Create(
@@ -1618,12 +1620,13 @@
             return UNKNOWN_ERROR;
     }
 
-    sp<MediaSource> audioEncoder = createAudioSource();
+    sp<MediaCodecSource> audioEncoder = createAudioSource();
     if (audioEncoder == NULL) {
         return UNKNOWN_ERROR;
     }
 
     writer->addSource(audioEncoder);
+    mAudioEncoderSource = audioEncoder;
     return OK;
 }
 
@@ -1649,13 +1652,14 @@
             return err;
         }
 
-        sp<MediaSource> encoder;
+        sp<MediaCodecSource> encoder;
         err = setupVideoEncoder(mediaSource, &encoder);
         if (err != OK) {
             return err;
         }
 
         writer->addSource(encoder);
+        mVideoEncoderSource = encoder;
         mTotalBitRate += mVideoBitRate;
     }
 
@@ -1726,25 +1730,53 @@
 
 status_t StagefrightRecorder::pause() {
     ALOGV("pause");
-    if (mWriter == NULL) {
-        return UNKNOWN_ERROR;
-    }
-    mWriter->pause();
-
-    if (mStarted) {
-        mStarted = false;
-
-        uint32_t params = 0;
-        if (mAudioSource != AUDIO_SOURCE_CNT) {
-            params |= IMediaPlayerService::kBatteryDataTrackAudio;
-        }
-        if (mVideoSource != VIDEO_SOURCE_LIST_END) {
-            params |= IMediaPlayerService::kBatteryDataTrackVideo;
-        }
-
-        addBatteryData(params);
+    if (!mStarted) {
+        return INVALID_OPERATION;
     }
 
+    // Already paused --- no-op.
+    if (mPauseStartTimeUs != 0) {
+        return OK;
+    }
+
+    if (mAudioEncoderSource != NULL) {
+        mAudioEncoderSource->pause();
+    }
+    if (mVideoEncoderSource != NULL) {
+        mVideoEncoderSource->pause();
+    }
+
+    mPauseStartTimeUs = systemTime() / 1000;
+
+    return OK;
+}
+
+status_t StagefrightRecorder::resume() {
+    ALOGV("resume");
+    if (!mStarted) {
+        return INVALID_OPERATION;
+    }
+
+    // Not paused --- no-op.
+    if (mPauseStartTimeUs == 0) {
+        return OK;
+    }
+
+    // 30 ms buffer to avoid timestamp overlap
+    mTotalPausedDurationUs += (systemTime() / 1000) - mPauseStartTimeUs - 30000;
+    double timeOffset = -mTotalPausedDurationUs;
+    if (mCaptureFpsEnable) {
+        timeOffset *= mCaptureFps / mFrameRate;
+    }
+    if (mAudioEncoderSource != NULL) {
+        mAudioEncoderSource->setInputBufferTimeOffset((int64_t)timeOffset);
+        mAudioEncoderSource->start();
+    }
+    if (mVideoEncoderSource != NULL) {
+        mVideoEncoderSource->setInputBufferTimeOffset((int64_t)timeOffset);
+        mVideoEncoderSource->start();
+    }
+    mPauseStartTimeUs = 0;
 
     return OK;
 }
@@ -1762,9 +1794,13 @@
         err = mWriter->stop();
         mWriter.clear();
     }
+    mTotalPausedDurationUs = 0;
+    mPauseStartTimeUs = 0;
 
     mGraphicBufferProducer.clear();
     mPersistentSurface.clear();
+    mAudioEncoderSource.clear();
+    mVideoEncoderSource.clear();
 
     if (mOutputFd >= 0) {
         ::close(mOutputFd);
@@ -1832,7 +1868,7 @@
     mCaptureFps = 0.0f;
     mTimeBetweenCaptureUs = -1;
     mCameraSourceTimeLapse = NULL;
-    mIsMetaDataStoredInVideoBuffers = false;
+    mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
     mEncoderProfiles = MediaProfiles::getInstance();
     mRotationDegrees = 0;
     mLatitudex10000 = -3600000;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index da00bc7..d7f43bc 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -24,12 +24,15 @@
 
 #include <system/audio.h>
 
+#include <MetadataBufferType.h>
+
 namespace android {
 
 class Camera;
 class ICameraRecordingProxy;
 class CameraSource;
 class CameraSourceTimeLapse;
+struct MediaCodecSource;
 struct MediaSource;
 struct MediaWriter;
 class MetaData;
@@ -52,7 +55,7 @@
     virtual status_t setVideoEncoder(video_encoder ve);
     virtual status_t setVideoSize(int width, int height);
     virtual status_t setVideoFrameRate(int frames_per_second);
-    virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
+    virtual status_t setCamera(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
     virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface);
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
@@ -62,6 +65,7 @@
     virtual status_t prepare();
     virtual status_t start();
     virtual status_t pause();
+    virtual status_t resume();
     virtual status_t stop();
     virtual status_t close();
     virtual status_t reset();
@@ -71,13 +75,14 @@
     virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const;
 
 private:
-    sp<ICamera> mCamera;
+    sp<hardware::ICamera> mCamera;
     sp<ICameraRecordingProxy> mCameraProxy;
     sp<IGraphicBufferProducer> mPreviewSurface;
     sp<IGraphicBufferConsumer> mPersistentSurface;
     sp<IMediaRecorderClient> mListener;
     String16 mClientName;
     uid_t mClientUid;
+    pid_t mClientPid;
     sp<MediaWriter> mWriter;
     int mOutputFd;
     sp<AudioSource> mAudioSourceNode;
@@ -118,9 +123,14 @@
 
     String8 mParams;
 
-    bool mIsMetaDataStoredInVideoBuffers;
+    MetadataBufferType mMetaDataStoredInVideoBuffers;
     MediaProfiles *mEncoderProfiles;
 
+    int64_t mPauseStartTimeUs;
+    int64_t mTotalPausedDurationUs;
+    sp<MediaCodecSource> mAudioEncoderSource;
+    sp<MediaCodecSource> mVideoEncoderSource;
+
     bool mStarted;
     // Needed when GLFrames are encoded.
     // An <IGraphicBufferProducer> pointer
@@ -139,7 +149,7 @@
     status_t setupRawAudioRecording();
     status_t setupRTPRecording();
     status_t setupMPEG2TSRecording();
-    sp<MediaSource> createAudioSource();
+    sp<MediaCodecSource> createAudioSource();
     status_t checkVideoEncoderCapabilities();
     status_t checkAudioEncoderCapabilities();
     // Generic MediaSource set-up. Returns the appropriate
@@ -148,7 +158,7 @@
     status_t setupMediaSource(sp<MediaSource> *mediaSource);
     status_t setupCameraSource(sp<CameraSource> *cameraSource);
     status_t setupAudioEncoder(const sp<MediaWriter>& writer);
-    status_t setupVideoEncoder(sp<MediaSource> cameraSource, sp<MediaSource> *source);
+    status_t setupVideoEncoder(sp<MediaSource> cameraSource, sp<MediaCodecSource> *source);
 
     // Encoding parameter handling utilities
     status_t setParameter(const String8 &key, const String8 &value);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index e8c28d5..56042d4 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -42,6 +42,7 @@
 
 static int64_t kLowWaterMarkUs = 2000000ll;  // 2secs
 static int64_t kHighWaterMarkUs = 5000000ll;  // 5secs
+static int64_t kHighWaterMarkRebufferUs = 15000000ll;  // 15secs
 static const ssize_t kLowWaterMarkBytes = 40000;
 static const ssize_t kHighWaterMarkBytes = 200000;
 
@@ -66,11 +67,8 @@
       mFd(-1),
       mDrmManagerClient(NULL),
       mBitrate(-1ll),
-      mPollBufferingGeneration(0),
-      mPendingReadBufferTypes(0),
-      mBuffering(false),
-      mPrepareBuffering(false),
-      mPrevBufferPercentage(-1) {
+      mPendingReadBufferTypes(0) {
+    mBufferingMonitor = new BufferingMonitor(notify);
     resetDataSource();
     DataSource::RegisterDefaultSniffers();
 }
@@ -91,6 +89,13 @@
     mDrmManagerClient = NULL;
     mStarted = false;
     mStopRead = true;
+
+    if (mBufferingMonitorLooper != NULL) {
+        mBufferingMonitorLooper->unregisterHandler(mBufferingMonitor->id());
+        mBufferingMonitorLooper->stop();
+        mBufferingMonitorLooper = NULL;
+    }
+    mBufferingMonitor->stop();
 }
 
 status_t NuPlayer::GenericSource::setDataSource(
@@ -135,7 +140,7 @@
 }
 
 status_t NuPlayer::GenericSource::initFromDataSource() {
-    sp<MediaExtractor> extractor;
+    sp<IMediaExtractor> extractor;
     String8 mimeType;
     float confidence;
     sp<AMessage> dummy;
@@ -210,12 +215,16 @@
     }
 
     for (size_t i = 0; i < numtracks; ++i) {
-        sp<MediaSource> track = extractor->getTrack(i);
+        sp<IMediaSource> track = extractor->getTrack(i);
         if (track == NULL) {
             continue;
         }
 
         sp<MetaData> meta = extractor->getTrackMetaData(i);
+        if (meta == NULL) {
+            ALOGE("no metadata for track %zu", i);
+            return UNKNOWN_ERROR;
+        }
 
         const char *mime;
         CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -334,6 +343,10 @@
     return mIsStreaming;
 }
 
+void NuPlayer::GenericSource::setOffloadAudio(bool offload) {
+    mBufferingMonitor->setOffloadAudio(offload);
+}
+
 NuPlayer::GenericSource::~GenericSource() {
     if (mLooper != NULL) {
         mLooper->unregisterHandler(id());
@@ -462,10 +475,18 @@
     }
 
     if (mIsStreaming) {
-        mPrepareBuffering = true;
+        if (mBufferingMonitorLooper == NULL) {
+            mBufferingMonitor->prepare(mCachedSource, mWVMExtractor, mDurationUs, mBitrate,
+                    mIsStreaming);
 
-        ensureCacheIsFetching();
-        restartPollBuffering();
+            mBufferingMonitorLooper = new ALooper;
+            mBufferingMonitorLooper->setName("GSBMonitor");
+            mBufferingMonitorLooper->start();
+            mBufferingMonitorLooper->registerHandler(mBufferingMonitor);
+        }
+
+        mBufferingMonitor->ensureCacheIsFetching();
+        mBufferingMonitor->restartPollBuffering();
     } else {
         notifyPrepared();
     }
@@ -488,7 +509,7 @@
         }
         mBitrate = -1;
 
-        cancelPollBuffering();
+        mBufferingMonitor->cancelPollBuffering();
     }
     notifyPrepared(err);
 }
@@ -567,182 +588,6 @@
     return OK;
 }
 
-void NuPlayer::GenericSource::schedulePollBuffering() {
-    sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
-    msg->setInt32("generation", mPollBufferingGeneration);
-    msg->post(1000000ll);
-}
-
-void NuPlayer::GenericSource::cancelPollBuffering() {
-    mBuffering = false;
-    ++mPollBufferingGeneration;
-    mPrevBufferPercentage = -1;
-}
-
-void NuPlayer::GenericSource::restartPollBuffering() {
-    if (mIsStreaming) {
-        cancelPollBuffering();
-        onPollBuffering();
-    }
-}
-
-void NuPlayer::GenericSource::notifyBufferingUpdate(int32_t percentage) {
-    // Buffering percent could go backward as it's estimated from remaining
-    // data and last access time. This could cause the buffering position
-    // drawn on media control to jitter slightly. Remember previously reported
-    // percentage and don't allow it to go backward.
-    if (percentage < mPrevBufferPercentage) {
-        percentage = mPrevBufferPercentage;
-    } else if (percentage > 100) {
-        percentage = 100;
-    }
-
-    mPrevBufferPercentage = percentage;
-
-    ALOGV("notifyBufferingUpdate: buffering %d%%", percentage);
-
-    sp<AMessage> msg = dupNotify();
-    msg->setInt32("what", kWhatBufferingUpdate);
-    msg->setInt32("percentage", percentage);
-    msg->post();
-}
-
-void NuPlayer::GenericSource::startBufferingIfNecessary() {
-    ALOGV("startBufferingIfNecessary: mPrepareBuffering=%d, mBuffering=%d",
-            mPrepareBuffering, mBuffering);
-
-    if (mPrepareBuffering) {
-        return;
-    }
-
-    if (!mBuffering) {
-        mBuffering = true;
-
-        ensureCacheIsFetching();
-        sendCacheStats();
-
-        sp<AMessage> notify = dupNotify();
-        notify->setInt32("what", kWhatPauseOnBufferingStart);
-        notify->post();
-    }
-}
-
-void NuPlayer::GenericSource::stopBufferingIfNecessary() {
-    ALOGV("stopBufferingIfNecessary: mPrepareBuffering=%d, mBuffering=%d",
-            mPrepareBuffering, mBuffering);
-
-    if (mPrepareBuffering) {
-        mPrepareBuffering = false;
-        notifyPrepared();
-        return;
-    }
-
-    if (mBuffering) {
-        mBuffering = false;
-
-        sendCacheStats();
-
-        sp<AMessage> notify = dupNotify();
-        notify->setInt32("what", kWhatResumeOnBufferingEnd);
-        notify->post();
-    }
-}
-
-void NuPlayer::GenericSource::sendCacheStats() {
-    int32_t kbps = 0;
-    status_t err = UNKNOWN_ERROR;
-
-    if (mWVMExtractor != NULL) {
-        err = mWVMExtractor->getEstimatedBandwidthKbps(&kbps);
-    } else if (mCachedSource != NULL) {
-        err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
-    }
-
-    if (err == OK) {
-        sp<AMessage> notify = dupNotify();
-        notify->setInt32("what", kWhatCacheStats);
-        notify->setInt32("bandwidth", kbps);
-        notify->post();
-    }
-}
-
-void NuPlayer::GenericSource::ensureCacheIsFetching() {
-    if (mCachedSource != NULL) {
-        mCachedSource->resumeFetchingIfNecessary();
-    }
-}
-
-void NuPlayer::GenericSource::onPollBuffering() {
-    status_t finalStatus = UNKNOWN_ERROR;
-    int64_t cachedDurationUs = -1ll;
-    ssize_t cachedDataRemaining = -1;
-
-    ALOGW_IF(mWVMExtractor != NULL && mCachedSource != NULL,
-            "WVMExtractor and NuCachedSource both present");
-
-    if (mWVMExtractor != NULL) {
-        cachedDurationUs =
-                mWVMExtractor->getCachedDurationUs(&finalStatus);
-    } else if (mCachedSource != NULL) {
-        cachedDataRemaining =
-                mCachedSource->approxDataRemaining(&finalStatus);
-
-        if (finalStatus == OK) {
-            off64_t size;
-            int64_t bitrate = 0ll;
-            if (mDurationUs > 0 && mCachedSource->getSize(&size) == OK) {
-                bitrate = size * 8000000ll / mDurationUs;
-            } else if (mBitrate > 0) {
-                bitrate = mBitrate;
-            }
-            if (bitrate > 0) {
-                cachedDurationUs = cachedDataRemaining * 8000000ll / bitrate;
-            }
-        }
-    }
-
-    if (finalStatus != OK) {
-        ALOGV("onPollBuffering: EOS (finalStatus = %d)", finalStatus);
-
-        if (finalStatus == ERROR_END_OF_STREAM) {
-            notifyBufferingUpdate(100);
-        }
-
-        stopBufferingIfNecessary();
-        return;
-    } else if (cachedDurationUs >= 0ll) {
-        if (mDurationUs > 0ll) {
-            int64_t cachedPosUs = getLastReadPosition() + cachedDurationUs;
-            int percentage = 100.0 * cachedPosUs / mDurationUs;
-            if (percentage > 100) {
-                percentage = 100;
-            }
-
-            notifyBufferingUpdate(percentage);
-        }
-
-        ALOGV("onPollBuffering: cachedDurationUs %.1f sec",
-                cachedDurationUs / 1000000.0f);
-
-        if (cachedDurationUs < kLowWaterMarkUs) {
-            startBufferingIfNecessary();
-        } else if (cachedDurationUs > kHighWaterMarkUs) {
-            stopBufferingIfNecessary();
-        }
-    } else if (cachedDataRemaining >= 0) {
-        ALOGV("onPollBuffering: cachedDataRemaining %zd bytes",
-                cachedDataRemaining);
-
-        if (cachedDataRemaining < kLowWaterMarkBytes) {
-            startBufferingIfNecessary();
-        } else if (cachedDataRemaining > kHighWaterMarkBytes) {
-            stopBufferingIfNecessary();
-        }
-    }
-
-    schedulePollBuffering();
-}
-
 void NuPlayer::GenericSource::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
       case kWhatPrepareAsync:
@@ -771,6 +616,11 @@
           break;
       }
 
+      case kWhatSendGlobalTimedTextData:
+      {
+          sendGlobalTextData(kWhatTimedTextData, mFetchTimedTextDataGeneration, msg);
+          break;
+      }
       case kWhatSendTimedTextData:
       {
           sendTextData(kWhatTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
@@ -782,7 +632,7 @@
       {
           int32_t trackIndex;
           CHECK(msg->findInt32("trackIndex", &trackIndex));
-          const sp<MediaSource> source = mSources.itemAt(trackIndex);
+          const sp<IMediaSource> source = mSources.itemAt(trackIndex);
 
           Track* track;
           const char *mime;
@@ -825,17 +675,7 @@
       case kWhatStart:
       case kWhatResume:
       {
-          restartPollBuffering();
-          break;
-      }
-
-      case kWhatPollBuffering:
-      {
-          int32_t generation;
-          CHECK(msg->findInt32("generation", &generation));
-          if (generation == mPollBufferingGeneration) {
-              onPollBuffering();
-          }
+          mBufferingMonitor->restartPollBuffering();
           break;
       }
 
@@ -965,16 +805,47 @@
     }
 }
 
+void NuPlayer::GenericSource::sendGlobalTextData(
+        uint32_t what,
+        int32_t curGen,
+        sp<AMessage> msg) {
+    int32_t msgGeneration;
+    CHECK(msg->findInt32("generation", &msgGeneration));
+    if (msgGeneration != curGen) {
+        // stale
+        return;
+    }
+
+    uint32_t textType;
+    const void *data;
+    size_t size = 0;
+    if (mTimedTextTrack.mSource->getFormat()->findData(
+                    kKeyTextFormatData, &textType, &data, &size)) {
+        mGlobalTimedText = new ABuffer(size);
+        if (mGlobalTimedText->data()) {
+            memcpy(mGlobalTimedText->data(), data, size);
+            sp<AMessage> globalMeta = mGlobalTimedText->meta();
+            globalMeta->setInt64("timeUs", 0);
+            globalMeta->setString("mime", MEDIA_MIMETYPE_TEXT_3GPP);
+            globalMeta->setInt32("global", 1);
+            sp<AMessage> notify = dupNotify();
+            notify->setInt32("what", what);
+            notify->setBuffer("buffer", mGlobalTimedText);
+            notify->post();
+        }
+    }
+}
+
 sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
     sp<AMessage> msg = new AMessage(kWhatGetFormat, this);
     msg->setInt32("audio", audio);
 
     sp<AMessage> response;
-    void *format;
+    sp<RefBase> format;
     status_t err = msg->postAndAwaitResponse(&response);
     if (err == OK && response != NULL) {
-        CHECK(response->findPointer("format", &format));
-        return (MetaData *)format;
+        CHECK(response->findObject("format", &format));
+        return static_cast<MetaData*>(format.get());
     } else {
         return NULL;
     }
@@ -986,7 +857,7 @@
 
     sp<AMessage> response = new AMessage;
     sp<MetaData> format = doGetFormatMeta(audio);
-    response->setPointer("format", format.get());
+    response->setObject("format", format);
 
     sp<AReplyToken> replyID;
     CHECK(msg->senderAwaitsResponse(&replyID));
@@ -994,7 +865,7 @@
 }
 
 sp<MetaData> NuPlayer::GenericSource::doGetFormatMeta(bool audio) const {
-    sp<MediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
+    sp<IMediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
 
     if (source == NULL) {
         return NULL;
@@ -1005,6 +876,10 @@
 
 status_t NuPlayer::GenericSource::dequeueAccessUnit(
         bool audio, sp<ABuffer> *accessUnit) {
+    if (audio && !mStarted) {
+        return -EWOULDBLOCK;
+    }
+
     Track *track = audio ? &mAudioTrack : &mVideoTrack;
 
     if (track->mSource == NULL) {
@@ -1051,6 +926,7 @@
     CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
     if (audio) {
         mAudioLastDequeueTimeUs = timeUs;
+        mBufferingMonitor->updateDequeuedBufferTime(timeUs);
     } else {
         mVideoLastDequeueTimeUs = timeUs;
     }
@@ -1091,6 +967,10 @@
 
     sp<AMessage> format = new AMessage();
     sp<MetaData> meta = mSources.itemAt(trackIndex)->getFormat();
+    if (meta == NULL) {
+        ALOGE("no metadata for track %zu", trackIndex);
+        return NULL;
+    }
 
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -1238,7 +1118,7 @@
         return OK;
     }
 
-    const sp<MediaSource> source = mSources.itemAt(trackIndex);
+    const sp<IMediaSource> source = mSources.itemAt(trackIndex);
     sp<MetaData> meta = source->getFormat();
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -1277,6 +1157,10 @@
             msg->post();
         }
 
+        sp<AMessage> msg2 = new AMessage(kWhatSendGlobalTimedTextData, this);
+        msg2->setInt32("generation", mFetchTimedTextDataGeneration);
+        msg2->post();
+
         if (mTimedTextTrack.mSource != NULL
                 && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
             sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, this);
@@ -1329,6 +1213,8 @@
 }
 
 status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) {
+    mBufferingMonitor->updateDequeuedBufferTime(-1ll);
+
     // If the Widevine source is stopped, do not attempt to read any
     // more buffers.
     if (mStopRead) {
@@ -1355,8 +1241,8 @@
     // If currently buffering, post kWhatBufferingEnd first, so that
     // NuPlayer resumes. Otherwise, if cache hits high watermark
     // before new polling happens, no one will resume the playback.
-    stopBufferingIfNecessary();
-    restartPollBuffering();
+    mBufferingMonitor->stopBufferingIfNecessary();
+    mBufferingMonitor->restartPollBuffering();
 
     return OK;
 }
@@ -1436,6 +1322,14 @@
         meta->setBuffer("sei", sei);
     }
 
+    const void *mpegUserDataPointer;
+    size_t mpegUserDataLength;
+    if (mb->meta_data()->findData(
+            kKeyMpegUserData, &dataType, &mpegUserDataPointer, &mpegUserDataLength)) {
+        sp<ABuffer> mpegUserData = ABuffer::CreateAsCopy(mpegUserDataPointer, mpegUserDataLength);
+        meta->setBuffer("mpegUserData", mpegUserData);
+    }
+
     if (actualTimeUs) {
         *actualTimeUs = timeUs;
     }
@@ -1526,30 +1420,60 @@
         options.setNonBlocking();
     }
 
+    bool couldReadMultiple = (!mIsWidevine && trackType == MEDIA_TRACK_TYPE_AUDIO);
     for (size_t numBuffers = 0; numBuffers < maxBuffers; ) {
-        MediaBuffer *mbuf;
-        status_t err = track->mSource->read(&mbuf, &options);
+        Vector<MediaBuffer *> mediaBuffers;
+        status_t err = NO_ERROR;
+
+        if (!seeking && couldReadMultiple) {
+            err = track->mSource->readMultiple(&mediaBuffers, (maxBuffers - numBuffers));
+        } else {
+            MediaBuffer *mbuf = NULL;
+            err = track->mSource->read(&mbuf, &options);
+            if (err == OK && mbuf != NULL) {
+                mediaBuffers.push_back(mbuf);
+            }
+        }
 
         options.clearSeekTo();
 
-        if (err == OK) {
+        size_t id = 0;
+        size_t count = mediaBuffers.size();
+        for (; id < count; ++id) {
             int64_t timeUs;
-            CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+            MediaBuffer *mbuf = mediaBuffers[id];
+            if (!mbuf->meta_data()->findInt64(kKeyTime, &timeUs)) {
+                mbuf->meta_data()->dumpToLog();
+                track->mPackets->signalEOS(ERROR_MALFORMED);
+                break;
+            }
             if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
                 mAudioTimeUs = timeUs;
+                mBufferingMonitor->updateQueuedTime(true /* isAudio */, timeUs);
             } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
                 mVideoTimeUs = timeUs;
+                mBufferingMonitor->updateQueuedTime(false /* isAudio */, timeUs);
             }
 
             queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
 
             sp<ABuffer> buffer = mediaBufferToABuffer(
-                    mbuf, trackType, seekTimeUs, actualTimeUs);
+                    mbuf, trackType, seekTimeUs,
+                    numBuffers == 0 ? actualTimeUs : NULL);
             track->mPackets->queueAccessUnit(buffer);
             formatChange = false;
             seeking = false;
             ++numBuffers;
-        } else if (err == WOULD_BLOCK) {
+        }
+        if (id < count) {
+            // Error, some mediaBuffer doesn't have kKeyTime.
+            for (; id < count; ++id) {
+                mediaBuffers[id]->release();
+            }
+            break;
+        }
+
+        if (err == WOULD_BLOCK) {
             break;
         } else if (err == INFO_FORMAT_CHANGED) {
 #if 0
@@ -1558,7 +1482,7 @@
                     NULL,
                     false /* discard */);
 #endif
-        } else {
+        } else if (err != OK) {
             queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
             track->mPackets->signalEOS(err);
             break;
@@ -1581,4 +1505,329 @@
     }
 }
 
+NuPlayer::GenericSource::BufferingMonitor::BufferingMonitor(const sp<AMessage> &notify)
+    : mNotify(notify),
+      mDurationUs(-1ll),
+      mBitrate(-1ll),
+      mIsStreaming(false),
+      mAudioTimeUs(0),
+      mVideoTimeUs(0),
+      mPollBufferingGeneration(0),
+      mPrepareBuffering(false),
+      mBuffering(false),
+      mPrevBufferPercentage(-1),
+      mOffloadAudio(false),
+      mFirstDequeuedBufferRealUs(-1ll),
+      mFirstDequeuedBufferMediaUs(-1ll),
+      mlastDequeuedBufferMediaUs(-1ll) {
+}
+
+NuPlayer::GenericSource::BufferingMonitor::~BufferingMonitor() {
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::prepare(
+        const sp<NuCachedSource2> &cachedSource,
+        const sp<WVMExtractor> &wvmExtractor,
+        int64_t durationUs,
+        int64_t bitrate,
+        bool isStreaming) {
+    Mutex::Autolock _l(mLock);
+    prepare_l(cachedSource, wvmExtractor, durationUs, bitrate, isStreaming);
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::stop() {
+    Mutex::Autolock _l(mLock);
+    prepare_l(NULL /* cachedSource */, NULL /* wvmExtractor */, -1 /* durationUs */,
+            -1 /* bitrate */, false /* isStreaming */);
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::cancelPollBuffering() {
+    Mutex::Autolock _l(mLock);
+    cancelPollBuffering_l();
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::restartPollBuffering() {
+    Mutex::Autolock _l(mLock);
+    if (mIsStreaming) {
+        cancelPollBuffering_l();
+        onPollBuffering_l();
+    }
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::stopBufferingIfNecessary() {
+    Mutex::Autolock _l(mLock);
+    stopBufferingIfNecessary_l();
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::ensureCacheIsFetching() {
+    Mutex::Autolock _l(mLock);
+    ensureCacheIsFetching_l();
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::updateQueuedTime(bool isAudio, int64_t timeUs) {
+    Mutex::Autolock _l(mLock);
+    if (isAudio) {
+        mAudioTimeUs = timeUs;
+    } else {
+        mVideoTimeUs = timeUs;
+    }
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::setOffloadAudio(bool offload) {
+    Mutex::Autolock _l(mLock);
+    mOffloadAudio = offload;
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::updateDequeuedBufferTime(int64_t mediaUs) {
+    Mutex::Autolock _l(mLock);
+    if (mediaUs < 0) {
+        mFirstDequeuedBufferRealUs = -1ll;
+        mFirstDequeuedBufferMediaUs = -1ll;
+    } else if (mFirstDequeuedBufferRealUs < 0) {
+        mFirstDequeuedBufferRealUs = ALooper::GetNowUs();
+        mFirstDequeuedBufferMediaUs = mediaUs;
+    }
+    mlastDequeuedBufferMediaUs = mediaUs;
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::prepare_l(
+        const sp<NuCachedSource2> &cachedSource,
+        const sp<WVMExtractor> &wvmExtractor,
+        int64_t durationUs,
+        int64_t bitrate,
+        bool isStreaming) {
+    ALOGW_IF(wvmExtractor != NULL && cachedSource != NULL,
+            "WVMExtractor and NuCachedSource are both present when "
+            "BufferingMonitor::prepare_l is called, ignore NuCachedSource");
+
+    mCachedSource = cachedSource;
+    mWVMExtractor = wvmExtractor;
+    mDurationUs = durationUs;
+    mBitrate = bitrate;
+    mIsStreaming = isStreaming;
+    mAudioTimeUs = 0;
+    mVideoTimeUs = 0;
+    mPrepareBuffering = (cachedSource != NULL || wvmExtractor != NULL);
+    cancelPollBuffering_l();
+    mOffloadAudio = false;
+    mFirstDequeuedBufferRealUs = -1ll;
+    mFirstDequeuedBufferMediaUs = -1ll;
+    mlastDequeuedBufferMediaUs = -1ll;
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::cancelPollBuffering_l() {
+    mBuffering = false;
+    ++mPollBufferingGeneration;
+    mPrevBufferPercentage = -1;
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::notifyBufferingUpdate_l(int32_t percentage) {
+    // Buffering percent could go backward as it's estimated from remaining
+    // data and last access time. This could cause the buffering position
+    // drawn on media control to jitter slightly. Remember previously reported
+    // percentage and don't allow it to go backward.
+    if (percentage < mPrevBufferPercentage) {
+        percentage = mPrevBufferPercentage;
+    } else if (percentage > 100) {
+        percentage = 100;
+    }
+
+    mPrevBufferPercentage = percentage;
+
+    ALOGV("notifyBufferingUpdate_l: buffering %d%%", percentage);
+
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatBufferingUpdate);
+    msg->setInt32("percentage", percentage);
+    msg->post();
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::startBufferingIfNecessary_l() {
+    if (mPrepareBuffering) {
+        return;
+    }
+
+    if (!mBuffering) {
+        ALOGD("startBufferingIfNecessary_l");
+
+        mBuffering = true;
+
+        ensureCacheIsFetching_l();
+        sendCacheStats_l();
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatPauseOnBufferingStart);
+        notify->post();
+    }
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::stopBufferingIfNecessary_l() {
+    if (mPrepareBuffering) {
+        ALOGD("stopBufferingIfNecessary_l, mBuffering=%d", mBuffering);
+
+        mPrepareBuffering = false;
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatPrepared);
+        notify->setInt32("err", OK);
+        notify->post();
+
+        return;
+    }
+
+    if (mBuffering) {
+        ALOGD("stopBufferingIfNecessary_l");
+        mBuffering = false;
+
+        sendCacheStats_l();
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatResumeOnBufferingEnd);
+        notify->post();
+    }
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::sendCacheStats_l() {
+    int32_t kbps = 0;
+    status_t err = UNKNOWN_ERROR;
+
+    if (mWVMExtractor != NULL) {
+        err = mWVMExtractor->getEstimatedBandwidthKbps(&kbps);
+    } else if (mCachedSource != NULL) {
+        err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
+    }
+
+    if (err == OK) {
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatCacheStats);
+        notify->setInt32("bandwidth", kbps);
+        notify->post();
+    }
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::ensureCacheIsFetching_l() {
+    if (mCachedSource != NULL) {
+        mCachedSource->resumeFetchingIfNecessary();
+    }
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::schedulePollBuffering_l() {
+    sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
+    msg->setInt32("generation", mPollBufferingGeneration);
+    // Enquires buffering status every second.
+    msg->post(1000000ll);
+}
+
+int64_t NuPlayer::GenericSource::BufferingMonitor::getLastReadPosition_l() {
+    if (mAudioTimeUs > 0) {
+        return mAudioTimeUs;
+    } else if (mVideoTimeUs > 0) {
+        return mVideoTimeUs;
+    } else {
+        return 0;
+    }
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::onPollBuffering_l() {
+    status_t finalStatus = UNKNOWN_ERROR;
+    int64_t cachedDurationUs = -1ll;
+    ssize_t cachedDataRemaining = -1;
+
+    if (mWVMExtractor != NULL) {
+        cachedDurationUs =
+                mWVMExtractor->getCachedDurationUs(&finalStatus);
+    } else if (mCachedSource != NULL) {
+        cachedDataRemaining =
+                mCachedSource->approxDataRemaining(&finalStatus);
+
+        if (finalStatus == OK) {
+            off64_t size;
+            int64_t bitrate = 0ll;
+            if (mDurationUs > 0 && mCachedSource->getSize(&size) == OK) {
+                // |bitrate| uses bits/second unit, while size is number of bytes.
+                bitrate = size * 8000000ll / mDurationUs;
+            } else if (mBitrate > 0) {
+                bitrate = mBitrate;
+            }
+            if (bitrate > 0) {
+                cachedDurationUs = cachedDataRemaining * 8000000ll / bitrate;
+            }
+        }
+    }
+
+    if (finalStatus != OK) {
+        ALOGV("onPollBuffering_l: EOS (finalStatus = %d)", finalStatus);
+
+        if (finalStatus == ERROR_END_OF_STREAM) {
+            notifyBufferingUpdate_l(100);
+        }
+
+        stopBufferingIfNecessary_l();
+        return;
+    } else if (cachedDurationUs >= 0ll) {
+        if (mDurationUs > 0ll) {
+            int64_t cachedPosUs = getLastReadPosition_l() + cachedDurationUs;
+            int percentage = 100.0 * cachedPosUs / mDurationUs;
+            if (percentage > 100) {
+                percentage = 100;
+            }
+
+            notifyBufferingUpdate_l(percentage);
+        }
+
+        ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec",
+                cachedDurationUs / 1000000.0f);
+
+        if (cachedDurationUs < kLowWaterMarkUs) {
+            // Take into account the data cached in downstream components to try to avoid
+            // unnecessary pause.
+            if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
+                int64_t downStreamCacheUs = mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
+                        - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
+                if (downStreamCacheUs > 0) {
+                    cachedDurationUs += downStreamCacheUs;
+                }
+            }
+
+            if (cachedDurationUs < kLowWaterMarkUs) {
+                startBufferingIfNecessary_l();
+            }
+        } else {
+            int64_t highWaterMark = mPrepareBuffering ? kHighWaterMarkUs : kHighWaterMarkRebufferUs;
+            if (cachedDurationUs > highWaterMark) {
+                stopBufferingIfNecessary_l();
+            }
+        }
+    } else if (cachedDataRemaining >= 0) {
+        ALOGV("onPollBuffering_l: cachedDataRemaining %zd bytes",
+                cachedDataRemaining);
+
+        if (cachedDataRemaining < kLowWaterMarkBytes) {
+            startBufferingIfNecessary_l();
+        } else if (cachedDataRemaining > kHighWaterMarkBytes) {
+            stopBufferingIfNecessary_l();
+        }
+    }
+
+    schedulePollBuffering_l();
+}
+
+void NuPlayer::GenericSource::BufferingMonitor::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+      case kWhatPollBuffering:
+      {
+          int32_t generation;
+          CHECK(msg->findInt32("generation", &generation));
+          Mutex::Autolock _l(mLock);
+          if (generation == mPollBufferingGeneration) {
+              onPollBuffering_l();
+          }
+          break;
+      }
+      default:
+          TRESPASS();
+          break;
+    }
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index ac980ef..2fd703e 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -77,6 +77,8 @@
 
     virtual bool isStreaming() const;
 
+    virtual void setOffloadAudio(bool offload);
+
 protected:
     virtual ~GenericSource();
 
@@ -90,6 +92,7 @@
         kWhatFetchSubtitleData,
         kWhatFetchTimedTextData,
         kWhatSendSubtitleData,
+        kWhatSendGlobalTimedTextData,
         kWhatSendTimedTextData,
         kWhatChangeAVSource,
         kWhatPollBuffering,
@@ -106,11 +109,88 @@
 
     struct Track {
         size_t mIndex;
-        sp<MediaSource> mSource;
+        sp<IMediaSource> mSource;
         sp<AnotherPacketSource> mPackets;
     };
 
-    Vector<sp<MediaSource> > mSources;
+    // Helper to monitor buffering status. The polling happens every second.
+    // When necessary, it will send out buffering events to the player.
+    struct BufferingMonitor : public AHandler {
+    public:
+        BufferingMonitor(const sp<AMessage> &notify);
+
+        // Set up state.
+        void prepare(const sp<NuCachedSource2> &cachedSource,
+                const sp<WVMExtractor> &wvmExtractor,
+                int64_t durationUs,
+                int64_t bitrate,
+                bool isStreaming);
+        // Stop and reset buffering monitor.
+        void stop();
+        // Cancel the current monitor task.
+        void cancelPollBuffering();
+        // Restart the monitor task.
+        void restartPollBuffering();
+        // Stop buffering task and send out corresponding events.
+        void stopBufferingIfNecessary();
+        // Make sure data source is getting data.
+        void ensureCacheIsFetching();
+        // Update media time of just extracted buffer from data source.
+        void updateQueuedTime(bool isAudio, int64_t timeUs);
+
+        // Set the offload mode.
+        void setOffloadAudio(bool offload);
+        // Update media time of last dequeued buffer which is sent to the decoder.
+        void updateDequeuedBufferTime(int64_t mediaUs);
+
+    protected:
+        virtual ~BufferingMonitor();
+        virtual void onMessageReceived(const sp<AMessage> &msg);
+
+    private:
+        enum {
+            kWhatPollBuffering,
+        };
+
+        sp<AMessage> mNotify;
+
+        sp<NuCachedSource2> mCachedSource;
+        sp<WVMExtractor> mWVMExtractor;
+        int64_t mDurationUs;
+        int64_t mBitrate;
+        bool mIsStreaming;
+
+        int64_t mAudioTimeUs;
+        int64_t mVideoTimeUs;
+        int32_t mPollBufferingGeneration;
+        bool mPrepareBuffering;
+        bool mBuffering;
+        int32_t mPrevBufferPercentage;
+
+        mutable Mutex mLock;
+
+        bool mOffloadAudio;
+        int64_t mFirstDequeuedBufferRealUs;
+        int64_t mFirstDequeuedBufferMediaUs;
+        int64_t mlastDequeuedBufferMediaUs;
+
+        void prepare_l(const sp<NuCachedSource2> &cachedSource,
+                const sp<WVMExtractor> &wvmExtractor,
+                int64_t durationUs,
+                int64_t bitrate,
+                bool isStreaming);
+        void cancelPollBuffering_l();
+        void notifyBufferingUpdate_l(int32_t percentage);
+        void startBufferingIfNecessary_l();
+        void stopBufferingIfNecessary_l();
+        void sendCacheStats_l();
+        void ensureCacheIsFetching_l();
+        int64_t getLastReadPosition_l();
+        void onPollBuffering_l();
+        void schedulePollBuffering_l();
+    };
+
+    Vector<sp<IMediaSource> > mSources;
     Track mAudioTrack;
     int64_t mAudioTimeUs;
     int64_t mAudioLastDequeueTimeUs;
@@ -146,16 +226,15 @@
     bool mStarted;
     bool mStopRead;
     int64_t mBitrate;
-    int32_t mPollBufferingGeneration;
+    sp<BufferingMonitor> mBufferingMonitor;
     uint32_t mPendingReadBufferTypes;
-    bool mBuffering;
-    bool mPrepareBuffering;
-    int32_t mPrevBufferPercentage;
+    sp<ABuffer> mGlobalTimedText;
 
     mutable Mutex mReadBufferLock;
     mutable Mutex mDisconnectLock;
 
     sp<ALooper> mLooper;
+    sp<ALooper> mBufferingMonitorLooper;
 
     void resetDataSource();
 
@@ -187,6 +266,10 @@
             uint32_t what, media_track_type type,
             int32_t curGen, sp<AnotherPacketSource> packets, sp<AMessage> msg);
 
+    void sendGlobalTextData(
+            uint32_t what,
+            int32_t curGen, sp<AMessage> msg);
+
     void sendTextData(
             uint32_t what, media_track_type type,
             int32_t curGen, sp<AnotherPacketSource> packets, sp<AMessage> msg);
@@ -206,16 +289,6 @@
     void queueDiscontinuityIfNeeded(
             bool seeking, bool formatChange, media_track_type trackType, Track *track);
 
-    void schedulePollBuffering();
-    void cancelPollBuffering();
-    void restartPollBuffering();
-    void onPollBuffering();
-    void notifyBufferingUpdate(int32_t percentage);
-    void startBufferingIfNecessary();
-    void stopBufferingIfNecessary();
-    void sendCacheStats();
-    void ensureCacheIsFetching();
-
     DISALLOW_EVIL_CONSTRUCTORS(GenericSource);
 };
 
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 126625a..3fffdc1a 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -101,15 +101,20 @@
 }
 
 sp<AMessage> NuPlayer::HTTPLiveSource::getFormat(bool audio) {
-    if (mLiveSession == NULL) {
-        return NULL;
+    sp<AMessage> format;
+    status_t err = -EWOULDBLOCK;
+    if (mLiveSession != NULL) {
+        err = mLiveSession->getStreamFormat(
+                audio ? LiveSession::STREAMTYPE_AUDIO
+                      : LiveSession::STREAMTYPE_VIDEO,
+                &format);
     }
 
-    sp<AMessage> format;
-    status_t err = mLiveSession->getStreamFormat(
-            audio ? LiveSession::STREAMTYPE_AUDIO
-                  : LiveSession::STREAMTYPE_VIDEO,
-            &format);
+    if (err == -EWOULDBLOCK) {
+        format = new AMessage();
+        format->setInt32("err", err);
+        return format;
+    }
 
     if (err != OK) {
         return NULL;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 26532d7..0b10ae4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -188,9 +188,11 @@
       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
       mVideoFpsHint(-1.f),
       mStarted(false),
+      mPrepared(false),
+      mResetting(false),
       mSourceStarted(false),
       mPaused(false),
-      mPausedByClient(false),
+      mPausedByClient(true),
       mPausedForBuffering(false) {
     clearFlushComplete();
 }
@@ -398,14 +400,20 @@
 }
 
 void NuPlayer::resetAsync() {
-    if (mSource != NULL) {
+    sp<Source> source;
+    {
+        Mutex::Autolock autoLock(mSourceLock);
+        source = mSource;
+    }
+
+    if (source != NULL) {
         // During a reset, the data source might be unresponsive already, we need to
         // disconnect explicitly so that reads exit promptly.
         // We can't queue the disconnect request to the looper, as it might be
         // queued behind a stuck read and never gets processed.
         // Doing a disconnect outside the looper to allows the pending reads to exit
         // (either successfully or with error).
-        mSource->disconnect();
+        source->disconnect();
     }
 
     (new AMessage(kWhatReset, this))->post();
@@ -421,8 +429,15 @@
 
 void NuPlayer::writeTrackInfo(
         Parcel* reply, const sp<AMessage> format) const {
+    if (format == NULL) {
+        ALOGE("NULL format");
+        return;
+    }
     int32_t trackType;
-    CHECK(format->findInt32("type", &trackType));
+    if (!format->findInt32("type", &trackType)) {
+        ALOGE("no track type");
+        return;
+    }
 
     AString mime;
     if (!format->findString("mime", &mime)) {
@@ -435,12 +450,16 @@
         } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
             mime = "video/";
         } else {
-            TRESPASS();
+            ALOGE("unknown track type: %d", trackType);
+            return;
         }
     }
 
     AString lang;
-    CHECK(format->findString("language", &lang));
+    if (!format->findString("language", &lang)) {
+        ALOGE("no language");
+        return;
+    }
 
     reply->writeInt32(2); // write something non-zero
     reply->writeInt32(trackType);
@@ -471,6 +490,7 @@
             sp<RefBase> obj;
             CHECK(msg->findObject("source", &obj));
             if (obj != NULL) {
+                Mutex::Autolock autoLock(mSourceLock);
                 mSource = static_cast<Source *>(obj.get());
             } else {
                 err = UNKNOWN_ERROR;
@@ -714,19 +734,52 @@
             readFromAMessage(msg, &rate);
             status_t err = OK;
             if (mRenderer != NULL) {
+                // AudioSink allows only 1.f and 0.f for offload mode.
+                // For other speed, switch to non-offload mode.
+                if (mOffloadAudio && ((rate.mSpeed != 0.f && rate.mSpeed != 1.f)
+                        || rate.mPitch != 1.f)) {
+                    int64_t currentPositionUs;
+                    if (getCurrentPosition(&currentPositionUs) != OK) {
+                        currentPositionUs = mPreviousSeekTimeUs;
+                    }
+
+                    // Set mPlaybackSettings so that the new audio decoder can
+                    // be created correctly.
+                    mPlaybackSettings = rate;
+                    if (!mPaused) {
+                        mRenderer->pause();
+                    }
+                    restartAudio(
+                            currentPositionUs, true /* forceNonOffload */,
+                            true /* needsToCreateAudioDecoder */);
+                    if (!mPaused) {
+                        mRenderer->resume();
+                    }
+                }
+
                 err = mRenderer->setPlaybackSettings(rate);
             }
             if (err == OK) {
                 if (rate.mSpeed == 0.f) {
                     onPause();
+                    mPausedByClient = true;
                     // save all other settings (using non-paused speed)
                     // so we can restore them on start
                     AudioPlaybackRate newRate = rate;
                     newRate.mSpeed = mPlaybackSettings.mSpeed;
                     mPlaybackSettings = newRate;
                 } else { /* rate.mSpeed != 0.f */
-                    onResume();
                     mPlaybackSettings = rate;
+                    if (mStarted) {
+                        // do not resume yet if the source is still buffering
+                        if (!mPausedForBuffering) {
+                            onResume();
+                        }
+                    } else if (mPrepared) {
+                        onStart();
+                    }
+
+                    mPausedByClient = false;
                 }
             }
 
@@ -833,16 +886,21 @@
 
             bool mHadAnySourcesBefore =
                 (mAudioDecoder != NULL) || (mVideoDecoder != NULL);
+            bool rescan = false;
 
             // initialize video before audio because successful initialization of
             // video may change deep buffer mode of audio.
             if (mSurface != NULL) {
-                instantiateDecoder(false, &mVideoDecoder);
+                if (instantiateDecoder(false, &mVideoDecoder) == -EWOULDBLOCK) {
+                    rescan = true;
+                }
             }
 
             // Don't try to re-open audio sink if there's an existing decoder.
             if (mAudioSink != NULL && mAudioDecoder == NULL) {
-                instantiateDecoder(true, &mAudioDecoder);
+                if (instantiateDecoder(true, &mAudioDecoder) == -EWOULDBLOCK) {
+                    rescan = true;
+                }
             }
 
             if (!mHadAnySourcesBefore
@@ -869,8 +927,7 @@
                 break;
             }
 
-            if ((mAudioDecoder == NULL && mAudioSink != NULL)
-                    || (mVideoDecoder == NULL && mSurface != NULL)) {
+            if (rescan) {
                 msg->post(100000ll);
                 mScanSourcesPending = true;
             }
@@ -1098,40 +1155,19 @@
                 int32_t reason;
                 CHECK(msg->findInt32("reason", &reason));
                 ALOGV("Tear down audio with reason %d.", reason);
-                mAudioDecoder.clear();
-                ++mAudioDecoderGeneration;
-                bool needsToCreateAudioDecoder = true;
-                if (mFlushingAudio == FLUSHING_DECODER) {
-                    mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
-                    mFlushingAudio = FLUSHED;
-                    finishFlushIfPossible();
-                } else if (mFlushingAudio == FLUSHING_DECODER_SHUTDOWN
-                        || mFlushingAudio == SHUTTING_DOWN_DECODER) {
-                    mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
-                    mFlushingAudio = SHUT_DOWN;
-                    finishFlushIfPossible();
-                    needsToCreateAudioDecoder = false;
-                }
-                if (mRenderer == NULL) {
+                if (reason == Renderer::kDueToTimeout && !(mPaused && mOffloadAudio)) {
+                    // TimeoutWhenPaused is only for offload mode.
+                    ALOGW("Receive a stale message for teardown.");
                     break;
                 }
-                closeAudioSink();
-                mRenderer->flush(
-                        true /* audio */, false /* notifyComplete */);
-                if (mVideoDecoder != NULL) {
-                    mRenderer->flush(
-                            false /* audio */, false /* notifyComplete */);
-                }
-
                 int64_t positionUs;
                 if (!msg->findInt64("positionUs", &positionUs)) {
                     positionUs = mPreviousSeekTimeUs;
                 }
-                performSeek(positionUs);
 
-                if (reason == Renderer::kDueToError && needsToCreateAudioDecoder) {
-                    instantiateDecoder(true /* audio */, &mAudioDecoder);
-                }
+                restartAudio(
+                        positionUs, reason == Renderer::kForceNonOffload /* forceNonOffload */,
+                        reason != Renderer::kDueToTimeout /* needsToCreateAudioDecoder */);
             }
             break;
         }
@@ -1145,6 +1181,8 @@
         {
             ALOGV("kWhatReset");
 
+            mResetting = true;
+
             mDeferredActions.push_back(
                     new FlushDecoderAction(
                         FLUSH_CMD_SHUTDOWN /* audio */,
@@ -1227,7 +1265,8 @@
 }
 
 void NuPlayer::onResume() {
-    if (!mPaused) {
+    if (!mPaused || mResetting) {
+        ALOGD_IF(mResetting, "resetting, onResume discarded");
         return;
     }
     mPaused = false;
@@ -1293,6 +1332,7 @@
     mAudioEOS = false;
     mVideoEOS = false;
     mStarted = true;
+    mPaused = false;
 
     uint32_t flags = 0;
 
@@ -1301,6 +1341,7 @@
     }
 
     sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+    ALOGV_IF(audioMeta == NULL, "no metadata for audio source");  // video only stream
     audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
     if (mAudioSink != NULL) {
         streamType = mAudioSink->getAudioStreamType();
@@ -1309,7 +1350,8 @@
     sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
 
     mOffloadAudio =
-        canOffloadStream(audioMeta, (videoFormat != NULL), mSource->isStreaming(), streamType);
+        canOffloadStream(audioMeta, (videoFormat != NULL), mSource->isStreaming(), streamType)
+                && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
     if (mOffloadAudio) {
         flags |= Renderer::FLAG_OFFLOAD_AUDIO;
     }
@@ -1441,7 +1483,8 @@
     mScanSourcesPending = true;
 }
 
-void NuPlayer::tryOpenAudioSinkForOffload(const sp<AMessage> &format, bool hasVideo) {
+void NuPlayer::tryOpenAudioSinkForOffload(
+        const sp<AMessage> &format, const sp<MetaData> &audioMeta, bool hasVideo) {
     // Note: This is called early in NuPlayer to determine whether offloading
     // is possible; otherwise the decoders call the renderer openAudioSink directly.
 
@@ -1451,8 +1494,6 @@
         // Any failure we turn off mOffloadAudio.
         mOffloadAudio = false;
     } else if (mOffloadAudio) {
-        sp<MetaData> audioMeta =
-                mSource->getFormatMeta(true /* audio */);
         sendMetaDataToHal(mAudioSink, audioMeta);
     }
 }
@@ -1461,7 +1502,45 @@
     mRenderer->closeAudioSink();
 }
 
-void NuPlayer::determineAudioModeChange() {
+void NuPlayer::restartAudio(
+        int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
+    if (mAudioDecoder != NULL) {
+        mAudioDecoder->pause();
+        mAudioDecoder.clear();
+        ++mAudioDecoderGeneration;
+    }
+    if (mFlushingAudio == FLUSHING_DECODER) {
+        mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
+        mFlushingAudio = FLUSHED;
+        finishFlushIfPossible();
+    } else if (mFlushingAudio == FLUSHING_DECODER_SHUTDOWN
+            || mFlushingAudio == SHUTTING_DOWN_DECODER) {
+        mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
+        mFlushingAudio = SHUT_DOWN;
+        finishFlushIfPossible();
+        needsToCreateAudioDecoder = false;
+    }
+    if (mRenderer == NULL) {
+        return;
+    }
+    closeAudioSink();
+    mRenderer->flush(true /* audio */, false /* notifyComplete */);
+    if (mVideoDecoder != NULL) {
+        mRenderer->flush(false /* audio */, false /* notifyComplete */);
+    }
+
+    performSeek(currentPositionUs);
+
+    if (forceNonOffload) {
+        mRenderer->signalDisableOffloadAudio();
+        mOffloadAudio = false;
+    }
+    if (needsToCreateAudioDecoder) {
+        instantiateDecoder(true /* audio */, &mAudioDecoder, !forceNonOffload);
+    }
+}
+
+void NuPlayer::determineAudioModeChange(const sp<AMessage> &audioFormat) {
     if (mSource == NULL || mAudioSink == NULL) {
         return;
     }
@@ -1477,14 +1556,14 @@
     audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
     const bool hasVideo = (videoFormat != NULL);
     const bool canOffload = canOffloadStream(
-            audioMeta, hasVideo, mSource->isStreaming(), streamType);
+            audioMeta, hasVideo, mSource->isStreaming(), streamType)
+                    && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
     if (canOffload) {
         if (!mOffloadAudio) {
             mRenderer->signalEnableOffloadAudio();
         }
         // open audio sink early under offload mode.
-        sp<AMessage> format = mSource->getFormat(true /*audio*/);
-        tryOpenAudioSinkForOffload(format, hasVideo);
+        tryOpenAudioSinkForOffload(audioFormat, audioMeta, hasVideo);
     } else {
         if (mOffloadAudio) {
             mRenderer->signalDisableOffloadAudio();
@@ -1493,7 +1572,8 @@
     }
 }
 
-status_t NuPlayer::instantiateDecoder(bool audio, sp<DecoderBase> *decoder) {
+status_t NuPlayer::instantiateDecoder(
+        bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange) {
     // The audio decoder could be cleared by tear down. If still in shut down
     // process, no need to create a new audio decoder.
     if (*decoder != NULL || (audio && mFlushingAudio == SHUT_DOWN)) {
@@ -1503,7 +1583,12 @@
     sp<AMessage> format = mSource->getFormat(audio);
 
     if (format == NULL) {
-        return -EWOULDBLOCK;
+        return UNKNOWN_ERROR;
+    } else {
+        status_t err;
+        if (format->findInt32("err", &err) && err) {
+            return err;
+        }
     }
 
     format->setInt32("priority", 0 /* realtime */);
@@ -1536,12 +1621,18 @@
         ++mAudioDecoderGeneration;
         notify->setInt32("generation", mAudioDecoderGeneration);
 
-        determineAudioModeChange();
+        if (checkAudioModeChange) {
+            determineAudioModeChange(format);
+        }
         if (mOffloadAudio) {
+            mSource->setOffloadAudio(true /* offload */);
+
             const bool hasVideo = (mSource->getFormat(false /*audio */) != NULL);
             format->setInt32("has-video", hasVideo);
             *decoder = new DecoderPassThrough(notify, mSource, mRenderer);
         } else {
+            mSource->setOffloadAudio(false /* offload */);
+
             *decoder = new Decoder(notify, mSource, mPID, mRenderer);
         }
     } else {
@@ -1919,6 +2010,7 @@
     if (mSource != NULL) {
         mSource->stop();
 
+        Mutex::Autolock autoLock(mSourceLock);
         mSource.clear();
     }
 
@@ -1930,6 +2022,8 @@
     }
 
     mStarted = false;
+    mPrepared = false;
+    mResetting = false;
     mSourceStarted = false;
 }
 
@@ -2041,6 +2135,8 @@
                         new FlushDecoderAction(FLUSH_CMD_SHUTDOWN /* audio */,
                                                FLUSH_CMD_SHUTDOWN /* video */));
                 processDeferredActions();
+            } else {
+                mPrepared = true;
             }
 
             sp<NuPlayerDriver> driver = mDriver.promote();
@@ -2111,11 +2207,6 @@
                 mPausedForBuffering = true;
                 onPause();
             }
-            // fall-thru
-        }
-
-        case Source::kWhatBufferingStart:
-        {
             notifyListener(MEDIA_INFO, MEDIA_INFO_BUFFERING_START, 0);
             break;
         }
@@ -2133,11 +2224,6 @@
                     onResume();
                 }
             }
-            // fall-thru
-        }
-
-        case Source::kWhatBufferingEnd:
-        {
             notifyListener(MEDIA_INFO, MEDIA_INFO_BUFFERING_END, 0);
             break;
         }
@@ -2190,7 +2276,7 @@
             int posMs;
             int64_t timeUs, posUs;
             driver->getCurrentPosition(&posMs);
-            posUs = posMs * 1000;
+            posUs = (int64_t) posMs * 1000ll;
             CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
             if (posUs < timeUs) {
@@ -2296,7 +2382,7 @@
     const void *data;
     size_t size = 0;
     int64_t timeUs;
-    int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS;
+    int32_t flag = TextDescriptions::IN_BAND_TEXT_3GPP;
 
     AString mime;
     CHECK(buffer->meta()->findString("mime", &mime));
@@ -2308,7 +2394,12 @@
     Parcel parcel;
     if (size > 0) {
         CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-        flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
+        int32_t global = 0;
+        if (buffer->meta()->findInt32("global", &global) && global) {
+            flag |= TextDescriptions::GLOBAL_DESCRIPTIONS;
+        } else {
+            flag |= TextDescriptions::LOCAL_DESCRIPTIONS;
+        }
         TextDescriptions::getParcelOfDescriptions(
                 (const uint8_t *)data, size, flag, timeUs / 1000, &parcel);
     }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index c9f0bbd..ae17c76 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -140,6 +140,7 @@
     bool mUIDValid;
     uid_t mUID;
     pid_t mPID;
+    Mutex mSourceLock;  // guard |mSource|.
     sp<Source> mSource;
     uint32_t mSourceFlags;
     sp<Surface> mSurface;
@@ -197,6 +198,8 @@
     AVSyncSettings mSyncSettings;
     float mVideoFpsHint;
     bool mStarted;
+    bool mPrepared;
+    bool mResetting;
     bool mSourceStarted;
 
     // Actual pause state, either as requested by client or due to buffering.
@@ -221,11 +224,15 @@
         mFlushComplete[1][1] = false;
     }
 
-    void tryOpenAudioSinkForOffload(const sp<AMessage> &format, bool hasVideo);
+    void tryOpenAudioSinkForOffload(
+            const sp<AMessage> &format, const sp<MetaData> &audioMeta, bool hasVideo);
     void closeAudioSink();
-    void determineAudioModeChange();
+    void restartAudio(
+            int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder);
+    void determineAudioModeChange(const sp<AMessage> &audioFormat);
 
-    status_t instantiateDecoder(bool audio, sp<DecoderBase> *decoder);
+    status_t instantiateDecoder(
+            bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange = true);
 
     status_t onInstantiateSecureDecoders();
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index ac3c6b6..13716cf 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -30,6 +30,9 @@
 
 namespace android {
 
+// In CEA-708B, the maximum bandwidth of CC is set to 9600bps.
+static const size_t kMaxBandwithSizeBytes = 9600 / 8;
+
 struct CCData {
     CCData(uint8_t type, uint8_t data1, uint8_t data2)
         : mType(type), mData1(data1), mData2(data2) {
@@ -116,15 +119,19 @@
 
 NuPlayer::CCDecoder::CCDecoder(const sp<AMessage> &notify)
     : mNotify(notify),
-      mCurrentChannel(0),
-      mSelectedTrack(-1) {
-      for (size_t i = 0; i < sizeof(mTrackIndices)/sizeof(mTrackIndices[0]); ++i) {
-          mTrackIndices[i] = -1;
-      }
+      mSelectedTrack(-1),
+      mDTVCCPacket(new ABuffer(kMaxBandwithSizeBytes)) {
+    mDTVCCPacket->setRange(0, 0);
+
+    // In CEA-608, streams from packets which have the value 0 of cc_type contain CC1 and CC2, and
+    // streams from packets which have the value 1 of cc_type contain CC3 and CC4.
+    // The following array indicates the current transmitting channels for each value of cc_type.
+    mLine21Channels[0] = 0; // CC1
+    mLine21Channels[1] = 2; // CC3
 }
 
 size_t NuPlayer::CCDecoder::getTrackCount() const {
-    return mFoundChannels.size();
+    return mTracks.size();
 }
 
 sp<AMessage> NuPlayer::CCDecoder::getTrackInfo(size_t index) const {
@@ -134,13 +141,31 @@
 
     sp<AMessage> format = new AMessage();
 
+    CCTrack track = mTracks[index];
+
     format->setInt32("type", MEDIA_TRACK_TYPE_SUBTITLE);
     format->setString("language", "und");
-    format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_608);
-    //CC1, field 0 channel 0
-    bool isDefaultAuto = (mFoundChannels[index] == 0);
+
+    switch (track.mTrackType) {
+        case kTrackTypeCEA608:
+            format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_608);
+            break;
+        case kTrackTypeCEA708:
+            format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_708);
+            break;
+        default:
+            ALOGE("Unknown track type: %d", track.mTrackType);
+            return NULL;
+    }
+
+    // For CEA-608 CC1, field 0 channel 0
+    bool isDefaultAuto = track.mTrackType == kTrackTypeCEA608
+            && track.mTrackChannel == 0;
+    // For CEA-708, Primary Caption Service.
+    bool isDefaultOnly = track.mTrackType == kTrackTypeCEA708
+            && track.mTrackChannel == 1;
     format->setInt32("auto", isDefaultAuto);
-    format->setInt32("default", isDefaultAuto);
+    format->setInt32("default", isDefaultAuto || isDefaultOnly);
     format->setInt32("forced", 0);
 
     return format;
@@ -167,24 +192,20 @@
         mSelectedTrack = -1;
     }
 
+    // Clear the previous track payloads
+    mCCMap.clear();
+
     return OK;
 }
 
 bool NuPlayer::CCDecoder::isSelected() const {
-    return mSelectedTrack >= 0 && mSelectedTrack < (int32_t) getTrackCount();
+    return mSelectedTrack >= 0 && mSelectedTrack < (int32_t)getTrackCount();
 }
 
 bool NuPlayer::CCDecoder::isTrackValid(size_t index) const {
     return index < getTrackCount();
 }
 
-int32_t NuPlayer::CCDecoder::getTrackIndex(size_t channel) const {
-    if (channel < sizeof(mTrackIndices)/sizeof(mTrackIndices[0])) {
-        return mTrackIndices[channel];
-    }
-    return -1;
-}
-
 // returns true if a new CC track is found
 bool NuPlayer::CCDecoder::extractFromSEI(const sp<ABuffer> &accessUnit) {
     sp<ABuffer> sei;
@@ -197,7 +218,7 @@
 
     bool trackAdded = false;
 
-    const NALPosition *nal = (NALPosition *) sei->data();
+    const NALPosition *nal = (NALPosition *)sei->data();
 
     for (size_t i = 0; i < sei->size() / sizeof(NALPosition); ++i, ++nal) {
         trackAdded |= parseSEINalUnit(
@@ -208,9 +229,8 @@
 }
 
 // returns true if a new CC track is found
-bool NuPlayer::CCDecoder::parseSEINalUnit(
-        int64_t timeUs, const uint8_t *nalStart, size_t nalSize) {
-    unsigned nalType = nalStart[0] & 0x1f;
+bool NuPlayer::CCDecoder::parseSEINalUnit(int64_t timeUs, const uint8_t *data, size_t size) {
+    unsigned nalType = data[0] & 0x1f;
 
     // the buffer should only have SEI in it
     if (nalType != 6) {
@@ -218,7 +238,8 @@
     }
 
     bool trackAdded = false;
-    NALBitReader br(nalStart + 1, nalSize - 1);
+    NALBitReader br(data + 1, size - 1);
+
     // sei_message()
     while (br.atLeastNumBitsLeft(16)) { // at least 16-bit for sei_message()
         uint32_t payload_type = 0;
@@ -256,53 +277,7 @@
             }
 
             if (isCC && payload_size > 2) {
-                // MPEG_cc_data()
-                // ATSC A/53 Part 4: 6.2.3.1
-                br.skipBits(1); //process_em_data_flag
-                bool process_cc_data_flag = br.getBits(1);
-                br.skipBits(1); //additional_data_flag
-                size_t cc_count = br.getBits(5);
-                br.skipBits(8); // em_data;
-                payload_size -= 2;
-
-                if (process_cc_data_flag) {
-                    AString out;
-
-                    sp<ABuffer> ccBuf = new ABuffer(cc_count * sizeof(CCData));
-                    ccBuf->setRange(0, 0);
-
-                    for (size_t i = 0; i < cc_count && payload_size >= 3; i++) {
-                        uint8_t marker = br.getBits(5);
-                        CHECK_EQ(marker, 0x1f);
-
-                        bool cc_valid = br.getBits(1);
-                        uint8_t cc_type = br.getBits(2);
-                        // remove odd parity bit
-                        uint8_t cc_data_1 = br.getBits(8) & 0x7f;
-                        uint8_t cc_data_2 = br.getBits(8) & 0x7f;
-
-                        payload_size -= 3;
-
-                        if (cc_valid
-                                && (cc_type == 0 || cc_type == 1)) {
-                            CCData cc(cc_type, cc_data_1, cc_data_2);
-                            if (!isNullPad(&cc)) {
-                                size_t channel;
-                                if (cc.getChannel(&channel) && getTrackIndex(channel) < 0) {
-                                    mTrackIndices[channel] = mFoundChannels.size();
-                                    mFoundChannels.push_back(channel);
-                                    trackAdded = true;
-                                }
-                                memcpy(ccBuf->data() + ccBuf->size(),
-                                        (void *)&cc, sizeof(cc));
-                                ccBuf->setRange(0, ccBuf->size() + sizeof(CCData));
-                            }
-                        }
-                    }
-
-                    mCCMap.add(timeUs, ccBuf);
-                    break;
-                }
+                trackAdded |= parseMPEGCCData(timeUs, br.data(), br.numBitsLeft() / 8);
             } else {
                 ALOGV("Malformed SEI payload type 4");
             }
@@ -317,31 +292,202 @@
     return trackAdded;
 }
 
-sp<ABuffer> NuPlayer::CCDecoder::filterCCBuf(
-        const sp<ABuffer> &ccBuf, size_t index) {
-    sp<ABuffer> filteredCCBuf = new ABuffer(ccBuf->size());
-    filteredCCBuf->setRange(0, 0);
+// returns true if a new CC track is found
+bool NuPlayer::CCDecoder::extractFromMPEGUserData(const sp<ABuffer> &accessUnit) {
+    sp<ABuffer> mpegUserData;
+    if (!accessUnit->meta()->findBuffer("mpegUserData", &mpegUserData)
+            || mpegUserData == NULL) {
+        return false;
+    }
 
-    size_t cc_count = ccBuf->size() / sizeof(CCData);
-    const CCData* cc_data = (const CCData*)ccBuf->data();
+    int64_t timeUs;
+    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+    bool trackAdded = false;
+
+    const size_t *userData = (size_t *)mpegUserData->data();
+
+    for (size_t i = 0; i < mpegUserData->size() / sizeof(size_t); ++i) {
+        trackAdded |= parseMPEGUserDataUnit(
+                timeUs, accessUnit->data() + userData[i], accessUnit->size() - userData[i]);
+    }
+
+    return trackAdded;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer::CCDecoder::parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size) {
+    ABitReader br(data + 4, 5);
+
+    uint32_t user_identifier = br.getBits(32);
+    uint8_t user_data_type = br.getBits(8);
+
+    if (user_identifier == 'GA94' && user_data_type == 0x3) {
+        return parseMPEGCCData(timeUs, data + 9, size - 9);
+    }
+
+    return false;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer::CCDecoder::parseMPEGCCData(int64_t timeUs, const uint8_t *data, size_t size) {
+    bool trackAdded = false;
+
+    // MPEG_cc_data()
+    // ATSC A/53 Part 4: 6.2.3.1
+    ABitReader br(data, size);
+
+    if (br.numBitsLeft() <= 16) {
+        return false;
+    }
+
+    br.skipBits(1);
+    bool process_cc_data_flag = br.getBits(1);
+    br.skipBits(1);
+    size_t cc_count = br.getBits(5);
+    br.skipBits(8);
+
+    if (!process_cc_data_flag || 3 * 8 * cc_count >= br.numBitsLeft()) {
+        return false;
+    }
+
+    sp<ABuffer> line21CCBuf = NULL;
+
     for (size_t i = 0; i < cc_count; ++i) {
-        size_t channel;
-        if (cc_data[i].getChannel(&channel)) {
-            mCurrentChannel = channel;
-        }
-        if (mCurrentChannel == mFoundChannels[index]) {
-            memcpy(filteredCCBuf->data() + filteredCCBuf->size(),
-                    (void *)&cc_data[i], sizeof(CCData));
-            filteredCCBuf->setRange(0, filteredCCBuf->size() + sizeof(CCData));
+        br.skipBits(5);
+        bool cc_valid = br.getBits(1);
+        uint8_t cc_type = br.getBits(2);
+
+        if (cc_valid) {
+            if (cc_type == 3) {
+                if (mDTVCCPacket->size() > 0) {
+                    trackAdded |= parseDTVCCPacket(
+                            timeUs, mDTVCCPacket->data(), mDTVCCPacket->size());
+                    mDTVCCPacket->setRange(0, 0);
+                }
+                memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
+                mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+                br.skipBits(16);
+            } else if (mDTVCCPacket->size() > 0 && cc_type == 2) {
+                memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
+                mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+                br.skipBits(16);
+            } else if (cc_type == 0 || cc_type == 1) {
+                uint8_t cc_data_1 = br.getBits(8) & 0x7f;
+                uint8_t cc_data_2 = br.getBits(8) & 0x7f;
+
+                CCData cc(cc_type, cc_data_1, cc_data_2);
+
+                if (isNullPad(&cc)) {
+                    continue;
+                }
+
+                size_t channel;
+                if (cc.getChannel(&channel)) {
+                    mLine21Channels[cc_type] = channel;
+
+                    // create a new track if it does not exist.
+                    getTrackIndex(kTrackTypeCEA608, channel, &trackAdded);
+                }
+
+                if (isSelected() && mTracks[mSelectedTrack].mTrackType == kTrackTypeCEA608
+                        && mTracks[mSelectedTrack].mTrackChannel == mLine21Channels[cc_type]) {
+                    if (line21CCBuf == NULL) {
+                        line21CCBuf = new ABuffer((cc_count - i) * sizeof(CCData));
+                        line21CCBuf->setRange(0, 0);
+                    }
+                    memcpy(line21CCBuf->data() + line21CCBuf->size(), &cc, sizeof(cc));
+                    line21CCBuf->setRange(0, line21CCBuf->size() + sizeof(CCData));
+                }
+            } else {
+                br.skipBits(16);
+            }
+        } else {
+            if ((cc_type == 3 || cc_type == 2) && mDTVCCPacket->size() > 0) {
+                trackAdded |= parseDTVCCPacket(timeUs, mDTVCCPacket->data(), mDTVCCPacket->size());
+                mDTVCCPacket->setRange(0, 0);
+            }
+            br.skipBits(16);
         }
     }
 
-    return filteredCCBuf;
+    if (isSelected() && mTracks[mSelectedTrack].mTrackType == kTrackTypeCEA608
+            && line21CCBuf != NULL && line21CCBuf->size() > 0) {
+        mCCMap.add(timeUs, line21CCBuf);
+    }
+
+    return trackAdded;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer::CCDecoder::parseDTVCCPacket(int64_t timeUs, const uint8_t *data, size_t size) {
+    // CEA-708B 5 DTVCC Packet Layer.
+    ABitReader br(data, size);
+    br.skipBits(2);
+
+    size_t packet_size = br.getBits(6);
+    if (packet_size == 0) packet_size = 64;
+    packet_size *= 2;
+
+    if (size != packet_size) {
+        return false;
+    }
+
+    bool trackAdded = false;
+
+    while (br.numBitsLeft() >= 16) {
+        // CEA-708B Figure 5 and 6.
+        uint8_t service_number = br.getBits(3);
+        size_t block_size = br.getBits(5);
+
+        if (service_number == 64) {
+            br.skipBits(2);
+            service_number = br.getBits(6);
+
+            if (service_number < 64) {
+                return trackAdded;
+            }
+        }
+
+        if (br.numBitsLeft() < block_size * 8) {
+            return trackAdded;
+        }
+
+        if (block_size > 0) {
+            size_t trackIndex = getTrackIndex(kTrackTypeCEA708, service_number, &trackAdded);
+            if (mSelectedTrack == (ssize_t)trackIndex) {
+                sp<ABuffer> ccPacket = new ABuffer(block_size);
+                memcpy(ccPacket->data(), br.data(), block_size);
+                mCCMap.add(timeUs, ccPacket);
+            }
+        }
+        br.skipBits(block_size * 8);
+    }
+
+    return trackAdded;
+}
+
+// return the track index for a given type and channel.
+// if the track does not exist, creates a new one.
+size_t NuPlayer::CCDecoder::getTrackIndex(
+        int32_t trackType, size_t channel, bool *trackAdded) {
+    CCTrack track(trackType, channel);
+    ssize_t index = mTrackIndices.indexOfKey(track);
+
+    if (index < 0) {
+        // A new track is added.
+        index = mTracks.size();
+        mTrackIndices.add(track, index);
+        mTracks.add(track);
+        *trackAdded = true;
+        return index;
+    }
+
+    return mTrackIndices.valueAt(index);
 }
 
 void NuPlayer::CCDecoder::decode(const sp<ABuffer> &accessUnit) {
-    if (extractFromSEI(accessUnit)) {
-        ALOGI("Found CEA-608 track");
+    if (extractFromMPEGUserData(accessUnit) || extractFromSEI(accessUnit)) {
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatTrackAdded);
         msg->post();
@@ -350,8 +496,7 @@
 }
 
 void NuPlayer::CCDecoder::display(int64_t timeUs) {
-    if (!isTrackValid(mSelectedTrack)) {
-        ALOGE("Could not find current track(index=%d)", mSelectedTrack);
+    if (!isSelected()) {
         return;
     }
 
@@ -361,7 +506,26 @@
         return;
     }
 
-    sp<ABuffer> ccBuf = filterCCBuf(mCCMap.valueAt(index), mSelectedTrack);
+    sp<ABuffer> ccBuf;
+
+    if (index == 0) {
+        ccBuf = mCCMap.valueAt(index);
+    } else {
+        size_t size = 0;
+
+        for (ssize_t i = 0; i <= index; ++i) {
+            size += mCCMap.valueAt(i)->size();
+        }
+
+        ccBuf = new ABuffer(size);
+        ccBuf->setRange(0, 0);
+
+        for (ssize_t i = 0; i <= index; ++i) {
+            sp<ABuffer> buf = mCCMap.valueAt(i);
+            memcpy(ccBuf->data() + ccBuf->size(), buf->data(), buf->size());
+            ccBuf->setRange(0, ccBuf->size() + buf->size());
+        }
+    }
 
     if (ccBuf->size() > 0) {
 #if 0
@@ -384,6 +548,25 @@
 
 void NuPlayer::CCDecoder::flush() {
     mCCMap.clear();
+    mDTVCCPacket->setRange(0, 0);
+}
+
+int32_t NuPlayer::CCDecoder::CCTrack::compare(const NuPlayer::CCDecoder::CCTrack& rhs) const {
+    int32_t cmp = mTrackType - rhs.mTrackType;
+    if (cmp != 0) return cmp;
+    return mTrackChannel - rhs.mTrackChannel;
+}
+
+bool NuPlayer::CCDecoder::CCTrack::operator<(const NuPlayer::CCDecoder::CCTrack& rhs) const {
+    return compare(rhs) < 0;
+}
+
+bool NuPlayer::CCDecoder::CCTrack::operator==(const NuPlayer::CCDecoder::CCTrack& rhs) const {
+    return compare(rhs) == 0;
+}
+
+bool NuPlayer::CCDecoder::CCTrack::operator!=(const NuPlayer::CCDecoder::CCTrack& rhs) const {
+    return compare(rhs) != 0;
 }
 
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
index 77fb0fe..a297334 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
@@ -28,6 +28,11 @@
         kWhatTrackAdded,
     };
 
+    enum {
+        kTrackTypeCEA608,
+        kTrackTypeCEA708,
+    };
+
     CCDecoder(const sp<AMessage> &notify);
 
     size_t getTrackCount() const;
@@ -39,18 +44,50 @@
     void flush();
 
 private:
+    // CC track identifier.
+    struct CCTrack {
+        CCTrack() : mTrackType(0), mTrackChannel(0) { }
+
+        CCTrack(const int32_t trackType, const size_t trackChannel)
+            : mTrackType(trackType), mTrackChannel(trackChannel) { }
+
+        int32_t mTrackType;
+        size_t mTrackChannel;
+
+        // The ordering of CCTracks is to build a map of track to index.
+        // It is necessary to find the index of the matched CCTrack when CC data comes.
+        int compare(const NuPlayer::CCDecoder::CCTrack& rhs) const;
+        inline bool operator<(const NuPlayer::CCDecoder::CCTrack& rhs) const;
+        inline bool operator==(const NuPlayer::CCDecoder::CCTrack& rhs) const;
+        inline bool operator!=(const NuPlayer::CCDecoder::CCTrack& rhs) const;
+    };
+
     sp<AMessage> mNotify;
     KeyedVector<int64_t, sp<ABuffer> > mCCMap;
-    size_t mCurrentChannel;
-    int32_t mSelectedTrack;
-    int32_t mTrackIndices[4];
-    Vector<size_t> mFoundChannels;
+    ssize_t mSelectedTrack;
+    KeyedVector<CCTrack, size_t> mTrackIndices;
+    Vector<CCTrack> mTracks;
+
+    // CEA-608 closed caption
+    size_t mLine21Channels[2]; // The current channels of NTSC_CC_FIELD_{1, 2}
+
+    // CEA-708 closed caption
+    sp<ABuffer> mDTVCCPacket;
 
     bool isTrackValid(size_t index) const;
-    int32_t getTrackIndex(size_t channel) const;
+    size_t getTrackIndex(int32_t trackType, size_t channel, bool *trackAdded);
+
+    // Extract from H.264 SEIs
     bool extractFromSEI(const sp<ABuffer> &accessUnit);
-    bool parseSEINalUnit(int64_t timeUs, const uint8_t *nalStart, size_t nalSize);
-    sp<ABuffer> filterCCBuf(const sp<ABuffer> &ccBuf, size_t index);
+    bool parseSEINalUnit(int64_t timeUs, const uint8_t *data, size_t size);
+
+    // Extract from MPEG user data
+    bool extractFromMPEGUserData(const sp<ABuffer> &accessUnit);
+    bool parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size);
+
+    // Extract CC tracks from MPEG_cc_data
+    bool parseMPEGCCData(int64_t timeUs, const uint8_t *data, size_t size);
+    bool parseDTVCCPacket(int64_t timeUs, const uint8_t *data, size_t size);
 
     DISALLOW_EVIL_CONSTRUCTORS(CCDecoder);
 };
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index c005f3f..4678956 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -69,7 +69,6 @@
       mIsSecure(false),
       mFormatChangePending(false),
       mTimeChangePending(false),
-      mPaused(true),
       mResumePending(false),
       mComponentName("decoder") {
     mCodecLooper = new ALooper;
@@ -526,7 +525,10 @@
         ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
         msg->setBuffer("buffer", buffer);
         mCSDsToSubmit.removeAt(0);
-        CHECK(onInputBufferFetched(msg));
+        if (!onInputBufferFetched(msg)) {
+            handleError(UNKNOWN_ERROR);
+            return false;
+        }
         return true;
     }
 
@@ -863,7 +865,11 @@
 
         // copy into codec buffer
         if (buffer != codecBuffer) {
-            CHECK_LE(buffer->size(), codecBuffer->capacity());
+            if (buffer->size() > codecBuffer->capacity()) {
+                handleError(ERROR_BUFFER_TOO_SMALL);
+                mDequeuedInputBuffers.push_back(bufferIx);
+                return false;
+            }
             codecBuffer->setRange(0, buffer->size());
             memcpy(codecBuffer->data(), buffer->data(), buffer->size());
         }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index eeb4af4..ae08b4b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -91,7 +91,6 @@
     bool mFormatChangePending;
     bool mTimeChangePending;
 
-    bool mPaused;
     bool mResumePending;
     AString mComponentName;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index 7e76842..04bb61c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -31,6 +31,7 @@
 NuPlayer::DecoderBase::DecoderBase(const sp<AMessage> &notify)
     :  mNotify(notify),
        mBufferGeneration(0),
+       mPaused(false),
        mStats(new AMessage),
        mRequestInputBuffersPending(false) {
     // Every decoder has its own looper because MediaCodec operations
@@ -83,6 +84,13 @@
     msg->post();
 }
 
+void NuPlayer::DecoderBase::pause() {
+    sp<AMessage> msg = new AMessage(kWhatPause, this);
+
+    sp<AMessage> response;
+    PostAndAwaitResponse(msg, &response);
+}
+
 status_t NuPlayer::DecoderBase::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
     sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, this);
     msg->setPointer("buffers", buffers);
@@ -146,6 +154,17 @@
             break;
         }
 
+        case kWhatPause:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            mPaused = true;
+
+            (new AMessage)->postReply(replyID);
+            break;
+        }
+
         case kWhatGetInputBuffers:
         {
             sp<AReplyToken> replyID;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index b0dc01d..a334ec5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -36,6 +36,9 @@
     void init();
     void setParameters(const sp<AMessage> &params);
 
+    // Synchronous call to ensure decoder will not request or send out data.
+    void pause();
+
     void setRenderer(const sp<Renderer> &renderer);
     virtual status_t setVideoSurface(const sp<Surface> &) { return INVALID_OPERATION; }
 
@@ -78,6 +81,7 @@
 
     sp<AMessage> mNotify;
     int32_t mBufferGeneration;
+    bool mPaused;
     sp<AMessage> mStats;
 
 private:
@@ -85,6 +89,7 @@
         kWhatConfigure           = 'conf',
         kWhatSetParameters       = 'setP',
         kWhatSetRenderer         = 'setR',
+        kWhatPause               = 'paus',
         kWhatGetInputBuffers     = 'gInB',
         kWhatRequestInputBuffers = 'reqB',
         kWhatFlush               = 'flus',
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 30146c4..f224635 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -47,7 +47,6 @@
       mSource(source),
       mRenderer(renderer),
       mSkipRenderingUntilMediaTimeUs(-1ll),
-      mPaused(false),
       mReachedEOS(true),
       mPendingAudioErr(OK),
       mPendingBuffersToDrain(0),
@@ -224,6 +223,11 @@
         status_t err = dequeueAccessUnit(&accessUnit);
 
         if (err == -EWOULDBLOCK) {
+            // Flush out the aggregate buffer to try to avoid underrun.
+            accessUnit = aggregateBuffer(NULL /* accessUnit */);
+            if (accessUnit != NULL) {
+                break;
+            }
             return err;
         } else if (err != OK) {
             if (err == INFO_DISCONTINUITY) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index db33e87..5850efa 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -52,7 +52,6 @@
     sp<Source> mSource;
     sp<Renderer> mRenderer;
     int64_t mSkipRenderingUntilMediaTimeUs;
-    bool mPaused;
 
     bool    mReachedEOS;
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index f288c36..0f4dce9 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -45,8 +45,7 @@
       mPlayerFlags(0),
       mAtEOS(false),
       mLooping(false),
-      mAutoLoop(false),
-      mStartupSeekTimeUs(-1) {
+      mAutoLoop(false) {
     ALOGV("NuPlayerDriver(%p)", this);
     mLooper->setName("NuPlayerDriver Looper");
 
@@ -244,7 +243,10 @@
 status_t NuPlayerDriver::start() {
     ALOGD("start(%p), state is %d, eos is %d", this, mState, mAtEOS);
     Mutex::Autolock autoLock(mLock);
+    return start_l();
+}
 
+status_t NuPlayerDriver::start_l() {
     switch (mState) {
         case STATE_UNPREPARED:
         {
@@ -261,25 +263,11 @@
 
         case STATE_PAUSED:
         case STATE_STOPPED_AND_PREPARED:
-        {
-            if (mAtEOS && mStartupSeekTimeUs < 0) {
-                mStartupSeekTimeUs = 0;
-                mPositionUs = -1;
-            }
-
-            // fall through
-        }
-
         case STATE_PREPARED:
         {
-            mAtEOS = false;
             mPlayer->start();
 
-            if (mStartupSeekTimeUs >= 0) {
-                mPlayer->seekToAsync(mStartupSeekTimeUs);
-                mStartupSeekTimeUs = -1;
-            }
-            break;
+            // fall through
         }
 
         case STATE_RUNNING:
@@ -330,12 +318,13 @@
 }
 
 status_t NuPlayerDriver::pause() {
+    ALOGD("pause(%p)", this);
     // The NuPlayerRenderer may get flushed if pause for long enough, e.g. the pause timeout tear
     // down for audio offload mode. If that happens, the NuPlayerRenderer will no longer know the
     // current position. So similar to seekTo, update |mPositionUs| to the pause position by calling
     // getCurrentPosition here.
-    int msec;
-    getCurrentPosition(&msec);
+    int unused;
+    getCurrentPosition(&unused);
 
     Mutex::Autolock autoLock(mLock);
 
@@ -364,14 +353,18 @@
 status_t NuPlayerDriver::setPlaybackSettings(const AudioPlaybackRate &rate) {
     status_t err = mPlayer->setPlaybackSettings(rate);
     if (err == OK) {
+        // try to update position
+        int unused;
+        getCurrentPosition(&unused);
         Mutex::Autolock autoLock(mLock);
         if (rate.mSpeed == 0.f && mState == STATE_RUNNING) {
             mState = STATE_PAUSED;
-            // try to update position
-            (void)mPlayer->getCurrentPosition(&mPositionUs);
             notifyListener_l(MEDIA_PAUSED);
-        } else if (rate.mSpeed != 0.f && mState == STATE_PAUSED) {
-            mState = STATE_RUNNING;
+        } else if (rate.mSpeed != 0.f
+                && (mState == STATE_PAUSED
+                    || mState == STATE_STOPPED_AND_PREPARED
+                    || mState == STATE_PREPARED)) {
+            err = start_l();
         }
     }
     return err;
@@ -390,7 +383,7 @@
 }
 
 status_t NuPlayerDriver::seekTo(int msec) {
-    ALOGD("seekTo(%p) %d ms", this, msec);
+    ALOGD("seekTo(%p) %d ms at state %d", this, msec, mState);
     Mutex::Autolock autoLock(mLock);
 
     int64_t seekTimeUs = msec * 1000ll;
@@ -399,8 +392,6 @@
         case STATE_PREPARED:
         case STATE_STOPPED_AND_PREPARED:
         case STATE_PAUSED:
-            mStartupSeekTimeUs = seekTimeUs;
-            // fall through.
         case STATE_RUNNING:
         {
             mAtEOS = false;
@@ -423,7 +414,7 @@
     int64_t tempUs = 0;
     {
         Mutex::Autolock autoLock(mLock);
-        if (mSeekInProgress || mState == STATE_PAUSED) {
+        if (mSeekInProgress || (mState == STATE_PAUSED && !mAtEOS)) {
             tempUs = (mPositionUs <= 0) ? 0 : mPositionUs;
             *msec = (int)divRound(tempUs, (int64_t)(1000));
             return OK;
@@ -458,7 +449,7 @@
 }
 
 status_t NuPlayerDriver::reset() {
-    ALOGD("reset(%p)", this);
+    ALOGD("reset(%p) at state %d", this, mState);
     Mutex::Autolock autoLock(mLock);
 
     switch (mState) {
@@ -501,7 +492,6 @@
 
     mDurationUs = -1;
     mPositionUs = -1;
-    mStartupSeekTimeUs = -1;
     mLooping = false;
 
     return OK;
@@ -725,7 +715,8 @@
 
 void NuPlayerDriver::notifyListener_l(
         int msg, int ext1, int ext2, const Parcel *in) {
-    ALOGD("notifyListener_l(%p), (%d, %d, %d)", this, msg, ext1, ext2);
+    ALOGD("notifyListener_l(%p), (%d, %d, %d), loop setting(%d, %d)",
+            this, msg, ext1, ext2, mAutoLoop, mLooping);
     switch (msg) {
         case MEDIA_PLAYBACK_COMPLETE:
         {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index d009fd7..26d3a60 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -122,9 +122,8 @@
     bool mLooping;
     bool mAutoLoop;
 
-    int64_t mStartupSeekTimeUs;
-
     status_t prepare_l();
+    status_t start_l();
     void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
 
     DISALLOW_EVIL_CONSTRUCTORS(NuPlayerDriver);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 4d25294..b47a4f1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include "NuPlayerRenderer.h"
+#include <algorithm>
 #include <cutils/properties.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -69,6 +70,11 @@
 // is closed to allow the audio DSP to power down.
 static const int64_t kOffloadPauseMaxUs = 10000000ll;
 
+// Maximum allowed delay from AudioSink, 1.5 seconds.
+static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
+
+static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
+
 // static
 const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
         AUDIO_CHANNEL_NONE,
@@ -86,6 +92,7 @@
         const sp<AMessage> &notify,
         uint32_t flags)
     : mAudioSink(sink),
+      mUseVirtualAudioSink(false),
       mNotify(notify),
       mFlags(flags),
       mNumFramesWritten(0),
@@ -95,6 +102,7 @@
       mVideoQueueGeneration(0),
       mAudioDrainGeneration(0),
       mVideoDrainGeneration(0),
+      mAudioEOSGeneration(0),
       mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
       mAudioFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
@@ -112,6 +120,8 @@
       mVideoRenderingStartGeneration(0),
       mAudioRenderingStartGeneration(0),
       mRenderingDataDelivered(false),
+      mNextAudioClockUpdateTimeUs(-1),
+      mLastAudioMediaTimeUs(-1),
       mAudioOffloadPauseTimeoutGeneration(0),
       mAudioTornDown(false),
       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
@@ -311,8 +321,33 @@
     msg->post();
 }
 
-// Called on any threads.
+// Called on any threads without mLock acquired.
 status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
+    status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
+    if (result == OK) {
+        return result;
+    }
+
+    // MediaClock has not started yet. Try to start it if possible.
+    {
+        Mutex::Autolock autoLock(mLock);
+        if (mAudioFirstAnchorTimeMediaUs == -1) {
+            return result;
+        }
+
+        AudioTimestamp ts;
+        status_t res = mAudioSink->getTimestamp(ts);
+        if (res != OK) {
+            return result;
+        }
+
+        // AudioSink has rendered some frames.
+        int64_t nowUs = ALooper::GetNowUs();
+        int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
+                + mAudioFirstAnchorTimeMediaUs;
+        mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
+    }
+
     return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
 }
 
@@ -453,8 +488,14 @@
 
                 // Let's give it more data after about half that time
                 // has elapsed.
+                delayUs /= 2;
+                // check the buffer size to estimate maximum delay permitted.
+                const int64_t maxDrainDelayUs = std::max(
+                        mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
+                ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
+                        (long long)delayUs, (long long)maxDrainDelayUs);
                 Mutex::Autolock autoLock(mLock);
-                postDrainAudioQueue_l(delayUs / 2);
+                postDrainAudioQueue_l(delayUs);
             }
             break;
         }
@@ -500,6 +541,19 @@
             break;
         }
 
+        case kWhatEOS:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("audioEOSGeneration", &generation));
+            if (generation != mAudioEOSGeneration) {
+                break;
+            }
+            status_t finalResult;
+            CHECK(msg->findInt32("finalResult", &finalResult));
+            notifyEOS(true /* audio */, finalResult);
+            break;
+        }
+
         case kWhatConfigPlayback:
         {
             sp<AReplyToken> replyID;
@@ -600,7 +654,10 @@
 
         case kWhatAudioTearDown:
         {
-            onAudioTearDown(kDueToError);
+            int32_t reason;
+            CHECK(msg->findInt32("reason", &reason));
+
+            onAudioTearDown((AudioTearDownReason)reason);
             break;
         }
 
@@ -694,7 +751,7 @@
         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
         {
             ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
-            me->notifyAudioTearDown();
+            me->notifyAudioTearDown(kDueToError);
             break;
         }
     }
@@ -755,7 +812,7 @@
     if (mAudioFirstAnchorTimeMediaUs >= 0) {
         int64_t nowUs = ALooper::GetNowUs();
         int64_t nowMediaUs =
-            mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
+            mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
         // we don't know how much data we are queueing for offloaded tracks.
         mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
     }
@@ -864,6 +921,7 @@
                 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
             }
             notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
+            mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
 
             mAudioQueue.erase(mAudioQueue.begin());
             entry = NULL;
@@ -898,13 +956,21 @@
                 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
                 // This can only happen when AudioSink was opened with doNotReconnect flag set to
                 // true, in which case the NuPlayer will handle the reconnect.
-                notifyAudioTearDown();
+                notifyAudioTearDown(kDueToError);
             }
             break;
         }
 
         entry->mOffset += written;
-        if (entry->mOffset == entry->mBuffer->size()) {
+        size_t remainder = entry->mBuffer->size() - entry->mOffset;
+        if ((ssize_t)remainder < mAudioSink->frameSize()) {
+            if (remainder > 0) {
+                ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
+                        remainder);
+                entry->mOffset += remainder;
+                copy -= remainder;
+            }
+
             entry->mNotifyConsumed->post();
             mAudioQueue.erase(mAudioQueue.begin());
 
@@ -932,7 +998,8 @@
             // AudioSink write is called in non-blocking mode.
             // It may return with a short count when:
             //
-            // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
+            // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
+            //    discarded.
             // 2) The data to be copied exceeds the available buffer in AudioSink.
             // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
             // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
@@ -973,7 +1040,16 @@
 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
     int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
-    return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs);
+    if (mUseVirtualAudioSink) {
+        int64_t nowUs = ALooper::GetNowUs();
+        int64_t mediaUs;
+        if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
+            return 0ll;
+        } else {
+            return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
+        }
+    }
+    return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs);
 }
 
 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
@@ -994,9 +1070,39 @@
         return;
     }
     setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
+
+    // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
+    if (mNextAudioClockUpdateTimeUs == -1) {
+        AudioTimestamp ts;
+        if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
+            mNextAudioClockUpdateTimeUs = 0; // start our clock updates
+        }
+    }
     int64_t nowUs = ALooper::GetNowUs();
-    int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
-    mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
+    if (mNextAudioClockUpdateTimeUs >= 0) {
+        if (nowUs >= mNextAudioClockUpdateTimeUs) {
+            int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
+            mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
+            mUseVirtualAudioSink = false;
+            mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
+        }
+    } else {
+        int64_t unused;
+        if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
+                && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
+                        > kMaxAllowedAudioSinkDelayUs)) {
+            // Enough data has been sent to AudioSink, but AudioSink has not rendered
+            // any data yet. Something is wrong with AudioSink, e.g., the device is not
+            // connected to audio out.
+            // Switch to system clock. This essentially creates a virtual AudioSink with
+            // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
+            // This virtual AudioSink renders audio data starting from the very first sample
+            // and it's paced by system clock.
+            ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
+            mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
+            mUseVirtualAudioSink = true;
+        }
+    }
     mAnchorNumFramesWritten = mNumFramesWritten;
     mAnchorTimeMediaUs = mediaTimeUs;
 }
@@ -1025,6 +1131,7 @@
         return;
     }
 
+    bool needRepostDrainVideoQueue = false;
     int64_t delayUs;
     int64_t nowUs = ALooper::GetNowUs();
     int64_t realTimeUs;
@@ -1042,8 +1149,17 @@
                 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
                 mAnchorTimeMediaUs = mediaTimeUs;
                 realTimeUs = nowUs;
-            } else {
+            } else if (!mVideoSampleReceived) {
+                // Always render the first video frame.
+                realTimeUs = nowUs;
+            } else if (mAudioFirstAnchorTimeMediaUs < 0
+                || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) {
                 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
+            } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) {
+                needRepostDrainVideoQueue = true;
+                realTimeUs = nowUs;
+            } else {
+                realTimeUs = nowUs;
             }
         }
         if (!mHasAudio) {
@@ -1056,15 +1172,25 @@
         // received after this buffer, repost in 10 msec. Otherwise repost
         // in 500 msec.
         delayUs = realTimeUs - nowUs;
+        int64_t postDelayUs = -1;
         if (delayUs > 500000) {
-            int64_t postDelayUs = 500000;
+            postDelayUs = 500000;
             if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
                 postDelayUs = 10000;
             }
+        } else if (needRepostDrainVideoQueue) {
+            // CHECK(mPlaybackRate > 0);
+            // CHECK(mAudioFirstAnchorTimeMediaUs >= 0);
+            // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0);
+            postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs;
+            postDelayUs /= mPlaybackRate;
+        }
+
+        if (postDelayUs >= 0) {
             msg->setWhat(kWhatPostDrainVideoQueue);
             msg->post(postDelayUs);
             mVideoScheduler->restart();
-            ALOGI("possible video time jump of %dms, retrying in %dms",
+            ALOGI("possible video time jump of %dms or uninitialized media clock, retrying in %dms",
                     (int)(delayUs / 1000), (int)(postDelayUs / 1000));
             mDrainVideoQueuePending = true;
             return;
@@ -1102,24 +1228,20 @@
         return;
     }
 
-    int64_t nowUs = -1;
+    int64_t nowUs = ALooper::GetNowUs();
     int64_t realTimeUs;
+    int64_t mediaTimeUs = -1;
     if (mFlags & FLAG_REAL_TIME) {
         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
     } else {
-        int64_t mediaTimeUs;
         CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
 
-        nowUs = ALooper::GetNowUs();
         realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
     }
 
     bool tooLate = false;
 
     if (!mPaused) {
-        if (nowUs == -1) {
-            nowUs = ALooper::GetNowUs();
-        }
         setVideoLateByUs(nowUs - realTimeUs);
         tooLate = (mVideoLateByUs > 40000);
 
@@ -1132,6 +1254,14 @@
             ALOGV("rendering video at media time %.2f secs",
                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
                     mediaUs) / 1E6);
+
+            if (!(mFlags & FLAG_REAL_TIME)
+                    && mLastAudioMediaTimeUs != -1
+                    && mediaTimeUs > mLastAudioMediaTimeUs) {
+                // If audio ends before video, video continues to drive media clock.
+                // Also smooth out videos >= 10fps.
+                mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
+            }
         }
     } else {
         setVideoLateByUs(0);
@@ -1143,6 +1273,12 @@
         }
     }
 
+    // Always render the first video frame while keeping stats on A/V sync.
+    if (!mVideoSampleReceived) {
+        realTimeUs = nowUs;
+        tooLate = false;
+    }
+
     entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
     entry->mNotifyConsumed->setInt32("render", !tooLate);
     entry->mNotifyConsumed->post();
@@ -1168,6 +1304,13 @@
 }
 
 void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
+    if (audio && delayUs > 0) {
+        sp<AMessage> msg = new AMessage(kWhatEOS, this);
+        msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
+        msg->setInt32("finalResult", finalResult);
+        msg->post(delayUs);
+        return;
+    }
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatEOS);
     notify->setInt32("audio", static_cast<int32_t>(audio));
@@ -1175,8 +1318,10 @@
     notify->post(delayUs);
 }
 
-void NuPlayer::Renderer::notifyAudioTearDown() {
-    (new AMessage(kWhatAudioTearDown, this))->post();
+void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
+    sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
+    msg->setInt32("reason", reason);
+    msg->post();
 }
 
 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
@@ -1318,6 +1463,7 @@
         if (audio) {
             notifyComplete = mNotifyCompleteAudio;
             mNotifyCompleteAudio = false;
+            mLastAudioMediaTimeUs = -1;
         } else {
             notifyComplete = mNotifyCompleteVideo;
             mNotifyCompleteVideo = false;
@@ -1342,6 +1488,7 @@
             flushQueue(&mAudioQueue);
 
             ++mAudioDrainGeneration;
+            ++mAudioEOSGeneration;
             prepareForMediaRenderingStart_l();
 
             // the frame count will be reset after flush.
@@ -1375,6 +1522,7 @@
             }
             mNumFramesWritten = 0;
         }
+        mNextAudioClockUpdateTimeUs = -1;
     } else {
         flushQueue(&mVideoQueue);
 
@@ -1484,10 +1632,9 @@
     mDrainAudioQueuePending = false;
     mDrainVideoQueuePending = false;
 
-    if (mHasAudio) {
-        mAudioSink->pause();
-        startAudioOffloadPauseTimeout();
-    }
+    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
+    mAudioSink->pause();
+    startAudioOffloadPauseTimeout();
 
     ALOGV("now paused audio queue has %zu entries, video has %zu entries",
           mAudioQueue.size(), mVideoQueue.size());
@@ -1498,12 +1645,13 @@
         return;
     }
 
-    if (mHasAudio) {
-        cancelAudioOffloadPauseTimeout();
+    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
+    cancelAudioOffloadPauseTimeout();
+    if (mAudioSink->ready()) {
         status_t err = mAudioSink->start();
         if (err != OK) {
             ALOGE("cannot start AudioSink err %d", err);
-            notifyAudioTearDown();
+            notifyAudioTearDown(kDueToError);
         }
     }
 
@@ -1553,70 +1701,6 @@
     return mSyncQueues;
 }
 
-// TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs()
-// as it acquires locks and may query the audio driver.
-//
-// Some calls could conceivably retrieve extrapolated data instead of
-// accessing getTimestamp() or getPosition() every time a data buffer with
-// a media time is received.
-//
-// Calculate duration of played samples if played at normal rate (i.e., 1.0).
-int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
-    uint32_t numFramesPlayed;
-    int64_t numFramesPlayedAt;
-    AudioTimestamp ts;
-    static const int64_t kStaleTimestamp100ms = 100000;
-
-    status_t res = mAudioSink->getTimestamp(ts);
-    if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
-        numFramesPlayed = ts.mPosition;
-        numFramesPlayedAt =
-            ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
-        const int64_t timestampAge = nowUs - numFramesPlayedAt;
-        if (timestampAge > kStaleTimestamp100ms) {
-            // This is an audio FIXME.
-            // getTimestamp returns a timestamp which may come from audio mixing threads.
-            // After pausing, the MixerThread may go idle, thus the mTime estimate may
-            // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
-            // the max latency should be about 25ms with an average around 12ms (to be verified).
-            // For safety we use 100ms.
-            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
-                    (long long)nowUs, (long long)numFramesPlayedAt);
-            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
-        }
-        //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
-    } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
-        numFramesPlayed = 0;
-        numFramesPlayedAt = nowUs;
-        //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
-        //        numFramesPlayed, (long long)numFramesPlayedAt);
-    } else {                         // case 3: transitory at new track or audio fast tracks.
-        res = mAudioSink->getPosition(&numFramesPlayed);
-        CHECK_EQ(res, (status_t)OK);
-        numFramesPlayedAt = nowUs;
-        numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
-        //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
-    }
-
-    //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
-    int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
-            + nowUs - numFramesPlayedAt;
-    if (durationUs < 0) {
-        // Occurs when numFramesPlayed position is very small and the following:
-        // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
-        //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
-        // (2) In case 3, using getPosition and adding mAudioSink->latency() to
-        //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
-        //
-        // Both of these are transitory conditions.
-        ALOGV("getPlayedOutAudioDurationUs: negative duration %lld set to zero", (long long)durationUs);
-        durationUs = 0;
-    }
-    ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
-            (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
-    return durationUs;
-}
-
 void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
     if (mAudioTornDown) {
         return;
@@ -1647,10 +1731,16 @@
 }
 
 void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
-    if (offloadingAudio()) {
-        mWakeLock->release(true);
-        ++mAudioOffloadPauseTimeoutGeneration;
-    }
+    // We may have called startAudioOffloadPauseTimeout() without
+    // the AudioSink open and with offloadingAudio enabled.
+    //
+    // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
+    // we always release the wakelock and increment the pause timeout generation.
+    //
+    // Note: The acquired wakelock prevents the device from suspending
+    // immediately after offload pause (in case a resume happens shortly thereafter).
+    mWakeLock->release(true);
+    ++mAudioOffloadPauseTimeoutGeneration;
 }
 
 status_t NuPlayer::Renderer::onOpenAudioSink(
@@ -1689,7 +1779,7 @@
                     mime.c_str(), audioFormat);
 
             int avgBitRate = -1;
-            format->findInt32("bit-rate", &avgBitRate);
+            format->findInt32("bitrate", &avgBitRate);
 
             int32_t aacProfile = -1;
             if (audioFormat == AUDIO_FORMAT_AAC
@@ -1760,6 +1850,9 @@
                 onDisableOffloadAudio();
                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
                 ALOGV("openAudioSink: offload failed");
+                if (offloadOnly) {
+                    notifyAudioTearDown(kForceNonOffload);
+                }
             } else {
                 mUseAudioCallback = true;  // offload mode transfers data through callback
                 ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
@@ -1805,6 +1898,10 @@
         // NuPlayer a chance to switch from non-offload mode to offload mode.
         // So we only set doNotReconnect when there's no video.
         const bool doNotReconnect = !hasVideo;
+
+        // We should always be able to set our playback settings if the sink is closed.
+        LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
+                "onOpenAudioSink: can't set playback rate on closed sink");
         status_t err = mAudioSink->open(
                     sampleRate,
                     numChannels,
@@ -1817,9 +1914,6 @@
                     NULL,
                     doNotReconnect,
                     frameCount);
-        if (err == OK) {
-            err = mAudioSink->setPlaybackRate(mPlaybackSettings);
-        }
         if (err != OK) {
             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
             mAudioSink->close();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 9479c31..004e21c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -92,8 +92,9 @@
     };
 
     enum AudioTearDownReason {
-        kDueToError = 0,
+        kDueToError = 0,   // Could restart with either offload or non-offload.
         kDueToTimeout,
+        kForceNonOffload,  // Restart only with non-offload.
     };
 
 protected:
@@ -134,6 +135,7 @@
     static const int64_t kMinPositionUpdateDelayUs;
 
     sp<MediaPlayerBase::AudioSink> mAudioSink;
+    bool mUseVirtualAudioSink;
     sp<AMessage> mNotify;
     Mutex mLock;
     uint32_t mFlags;
@@ -148,6 +150,7 @@
     int32_t mVideoQueueGeneration;
     int32_t mAudioDrainGeneration;
     int32_t mVideoDrainGeneration;
+    int32_t mAudioEOSGeneration;
 
     sp<MediaClock> mMediaClock;
     float mPlaybackRate; // audio track rate
@@ -178,7 +181,9 @@
     int32_t mAudioRenderingStartGeneration;
     bool mRenderingDataDelivered;
 
-    int64_t mLastPositionUpdateUs;
+    int64_t mNextAudioClockUpdateTimeUs;
+    // the media timestamp of last audio sample right before EOS.
+    int64_t mLastAudioMediaTimeUs;
 
     int32_t mAudioOffloadPauseTimeoutGeneration;
     bool mAudioTornDown;
@@ -212,7 +217,6 @@
     bool onDrainAudioQueue();
     void drainAudioQueueUntilLastEOS();
     int64_t getPendingAudioPlayoutDurationUs(int64_t nowUs);
-    int64_t getPlayedOutAudioDurationUs(int64_t nowUs);
     void postDrainAudioQueue_l(int64_t delayUs = 0);
 
     void clearAnchorTime_l();
@@ -259,7 +263,7 @@
     void notifyPosition();
     void notifyVideoLateBy(int64_t lateByUs);
     void notifyVideoRenderingStart();
-    void notifyAudioTearDown();
+    void notifyAudioTearDown(AudioTearDownReason reason);
 
     void flushQueue(List<QueueEntry> *queue);
     bool dropBufferIfStale(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 11a6a9f..0176eafa 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -46,8 +46,6 @@
         kWhatFlagsChanged,
         kWhatVideoSizeChanged,
         kWhatBufferingUpdate,
-        kWhatBufferingStart,
-        kWhatBufferingEnd,
         kWhatPauseOnBufferingStart,
         kWhatResumeOnBufferingEnd,
         kWhatCacheStats,
@@ -122,6 +120,8 @@
         return true;
     }
 
+    virtual void setOffloadAudio(bool /* offload */) {}
+
 protected:
     virtual ~Source() {}
 
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index af0351e..1b7dff5 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -32,6 +32,12 @@
 
 const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
 
+// Buffer Underflow/Prepare/StartServer/Overflow Marks
+const int64_t NuPlayer::RTSPSource::kUnderflowMarkUs   =  1000000ll;
+const int64_t NuPlayer::RTSPSource::kPrepareMarkUs     =  3000000ll;
+const int64_t NuPlayer::RTSPSource::kStartServerMarkUs =  5000000ll;
+const int64_t NuPlayer::RTSPSource::kOverflowMarkUs    = 10000000ll;
+
 NuPlayer::RTSPSource::RTSPSource(
         const sp<AMessage> &notify,
         const sp<IMediaHTTPService> &httpService,
@@ -51,6 +57,7 @@
       mFinalResult(OK),
       mDisconnectReplyID(0),
       mBuffering(false),
+      mInPreparationPhase(true),
       mSeekGeneration(0),
       mEOSTimeoutAudio(0),
       mEOSTimeoutVideo(0) {
@@ -76,6 +83,11 @@
 }
 
 void NuPlayer::RTSPSource::prepareAsync() {
+    if (mIsSDP && mHTTPService == NULL) {
+        notifyPrepared(BAD_VALUE);
+        return;
+    }
+
     if (mLooper == NULL) {
         mLooper = new ALooper;
         mLooper->setName("rtsp");
@@ -122,29 +134,6 @@
     msg->postAndAwaitResponse(&dummy);
 }
 
-void NuPlayer::RTSPSource::pause() {
-    int64_t mediaDurationUs = 0;
-    getDuration(&mediaDurationUs);
-    for (size_t index = 0; index < mTracks.size(); index++) {
-        TrackInfo *info = &mTracks.editItemAt(index);
-        sp<AnotherPacketSource> source = info->mSource;
-
-        // Check if EOS or ERROR is received
-        if (source != NULL && source->isFinished(mediaDurationUs)) {
-            return;
-        }
-    }
-    if (mHandler != NULL) {
-        mHandler->pause();
-    }
-}
-
-void NuPlayer::RTSPSource::resume() {
-    if (mHandler != NULL) {
-        mHandler->resume();
-    }
-}
-
 status_t NuPlayer::RTSPSource::feedMoreTSData() {
     Mutex::Autolock _l(mBufferingLock);
     return mFinalResult;
@@ -319,6 +308,73 @@
     mHandler->seek(seekTimeUs);
 }
 
+void NuPlayer::RTSPSource::schedulePollBuffering() {
+    sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
+    msg->post(1000000ll); // 1 second intervals
+}
+
+void NuPlayer::RTSPSource::checkBuffering(
+        bool *prepared, bool *underflow, bool *overflow, bool *startServer) {
+    size_t numTracks = mTracks.size();
+    size_t preparedCount, underflowCount, overflowCount, startCount;
+    preparedCount = underflowCount = overflowCount = startCount = 0;
+    for (size_t i = 0; i < numTracks; ++i) {
+        status_t finalResult;
+        TrackInfo *info = &mTracks.editItemAt(i);
+        sp<AnotherPacketSource> src = info->mSource;
+        int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
+
+        // isFinished when duration is 0 checks for EOS result only
+        if (bufferedDurationUs > kPrepareMarkUs || src->isFinished(/* duration */ 0)) {
+            ++preparedCount;
+        }
+
+        if (src->isFinished(/* duration */ 0)) {
+            ++overflowCount;
+        } else {
+            if (bufferedDurationUs < kUnderflowMarkUs) {
+                ++underflowCount;
+            }
+            if (bufferedDurationUs > kOverflowMarkUs) {
+                ++overflowCount;
+            }
+            if (bufferedDurationUs < kStartServerMarkUs) {
+                ++startCount;
+            }
+        }
+    }
+
+    *prepared    = (preparedCount == numTracks);
+    *underflow   = (underflowCount > 0);
+    *overflow    = (overflowCount == numTracks);
+    *startServer = (startCount > 0);
+}
+
+void NuPlayer::RTSPSource::onPollBuffering() {
+    bool prepared, underflow, overflow, startServer;
+    checkBuffering(&prepared, &underflow, &overflow, &startServer);
+
+    if (prepared && mInPreparationPhase) {
+        mInPreparationPhase = false;
+        notifyPrepared();
+    }
+
+    if (!mInPreparationPhase && underflow) {
+        startBufferingIfNecessary();
+    }
+
+    if (overflow && mHandler != NULL) {
+        stopBufferingIfNecessary();
+        mHandler->pause();
+    }
+
+    if (startServer && mHandler != NULL) {
+        mHandler->resume();
+    }
+
+    schedulePollBuffering();
+}
+
 void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
     if (msg->what() == kWhatDisconnect) {
         sp<AReplyToken> replyID;
@@ -343,6 +399,9 @@
 
         performSeek(seekTimeUs);
         return;
+    } else if (msg->what() == kWhatPollBuffering) {
+        onPollBuffering();
+        return;
     }
 
     CHECK_EQ(msg->what(), (int)kWhatNotify);
@@ -367,7 +426,7 @@
             }
 
             notifyFlagsChanged(flags);
-            notifyPrepared();
+            schedulePollBuffering();
             break;
         }
 
@@ -380,10 +439,8 @@
         case MyHandler::kWhatSeekDone:
         {
             mState = CONNECTED;
-            if (mSeekReplyID != NULL) {
-                // Unblock seekTo here in case we attempted to seek in a live stream
-                finishSeek(OK);
-            }
+            // Unblock seekTo here in case we attempted to seek in a live stream
+            finishSeek(OK);
             break;
         }
 
@@ -404,12 +461,13 @@
 
             status_t err = OK;
             msg->findInt32("err", &err);
-            finishSeek(err);
 
             if (err == OK) {
                 int64_t timeUs;
                 CHECK(msg->findInt64("time", &timeUs));
                 mHandler->continueSeekAfterPause(timeUs);
+            } else {
+                finishSeek(err);
             }
             break;
         }
@@ -720,7 +778,7 @@
         mBuffering = true;
 
         sp<AMessage> notify = dupNotify();
-        notify->setInt32("what", kWhatBufferingStart);
+        notify->setInt32("what", kWhatPauseOnBufferingStart);
         notify->post();
     }
 }
@@ -736,7 +794,7 @@
         mBuffering = false;
 
         sp<AMessage> notify = dupNotify();
-        notify->setInt32("what", kWhatBufferingEnd);
+        notify->setInt32("what", kWhatResumeOnBufferingEnd);
         notify->post();
     }
 
@@ -744,7 +802,9 @@
 }
 
 void NuPlayer::RTSPSource::finishSeek(status_t err) {
-    CHECK(mSeekReplyID != NULL);
+    if (mSeekReplyID == NULL) {
+        return;
+    }
     sp<AMessage> seekReply = new AMessage;
     seekReply->setInt32("err", err);
     seekReply->postReply(mSeekReplyID);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 6438a1e..a6a7644 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -43,8 +43,6 @@
     virtual void prepareAsync();
     virtual void start();
     virtual void stop();
-    virtual void pause();
-    virtual void resume();
 
     virtual status_t feedMoreTSData();
 
@@ -65,6 +63,7 @@
         kWhatNotify          = 'noti',
         kWhatDisconnect      = 'disc',
         kWhatPerformSeek     = 'seek',
+        kWhatPollBuffering   = 'poll',
     };
 
     enum State {
@@ -79,6 +78,12 @@
         kFlagIncognito = 1,
     };
 
+    // Buffer Prepare/Underflow/Overflow/Resume Marks
+    static const int64_t kPrepareMarkUs;
+    static const int64_t kUnderflowMarkUs;
+    static const int64_t kOverflowMarkUs;
+    static const int64_t kStartServerMarkUs;
+
     struct TrackInfo {
         sp<AnotherPacketSource> mSource;
 
@@ -100,6 +105,7 @@
     sp<AReplyToken> mDisconnectReplyID;
     Mutex mBufferingLock;
     bool mBuffering;
+    bool mInPreparationPhase;
 
     sp<ALooper> mLooper;
     sp<MyHandler> mHandler;
@@ -126,6 +132,9 @@
     void finishDisconnectIfPossible();
 
     void performSeek(int64_t seekTimeUs);
+    void schedulePollBuffering();
+    void checkBuffering(bool *prepared, bool *underflow, bool *overflow, bool *startServer);
+    void onPollBuffering();
 
     bool haveSufficientDataOnAllTracks();
 
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index 0246b59..c4147e1 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -29,9 +29,12 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 
 namespace android {
 
+const int32_t kNumListenerQueuePackets = 80;
+
 NuPlayer::StreamingSource::StreamingSource(
         const sp<AMessage> &notify,
         const sp<IStreamSource> &source)
@@ -84,7 +87,7 @@
 }
 
 void NuPlayer::StreamingSource::onReadBuffer() {
-    for (int32_t i = 0; i < 50; ++i) {
+    for (int32_t i = 0; i < kNumListenerQueuePackets; ++i) {
         char buffer[188];
         sp<AMessage> extra;
         ssize_t n = mStreamListener->read(buffer, sizeof(buffer), &extra);
@@ -215,14 +218,21 @@
     return static_cast<AnotherPacketSource *>(source.get());
 }
 
-sp<MetaData> NuPlayer::StreamingSource::getFormatMeta(bool audio) {
+sp<AMessage> NuPlayer::StreamingSource::getFormat(bool audio) {
     sp<AnotherPacketSource> source = getSource(audio);
 
+    sp<AMessage> format = new AMessage;
     if (source == NULL) {
-        return NULL;
+        format->setInt32("err", -EWOULDBLOCK);
+        return format;
     }
 
-    return source->getFormat();
+    sp<MetaData> meta = source->getFormat();
+    status_t err = convertMetaDataToMessage(meta, &format);
+    if (err != OK) {
+        format->setInt32("err", err);
+    }
+    return format;
 }
 
 status_t NuPlayer::StreamingSource::dequeueAccessUnit(
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.h b/media/libmediaplayerservice/nuplayer/StreamingSource.h
index 1f95f3c..db88c7f 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.h
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.h
@@ -46,7 +46,7 @@
 
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
-    virtual sp<MetaData> getFormatMeta(bool audio);
+    virtual sp<AMessage> getFormat(bool audio);
 
 private:
     enum {
diff --git a/media/libmediaplayerservice/tests/Android.mk b/media/libmediaplayerservice/tests/Android.mk
index 8cbf782..ea75a97 100644
--- a/media/libmediaplayerservice/tests/Android.mk
+++ b/media/libmediaplayerservice/tests/Android.mk
@@ -12,6 +12,7 @@
 LOCAL_SHARED_LIBRARIES := \
 	liblog \
 	libmediaplayerservice \
+	libmediadrm \
 	libutils \
 
 LOCAL_C_INCLUDES := \
diff --git a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
index de350a1..c5212fc 100644
--- a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
+++ b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
@@ -20,9 +20,9 @@
 
 #include <gtest/gtest.h>
 
-#include "Drm.h"
-#include "DrmSessionClientInterface.h"
-#include "DrmSessionManager.h"
+#include <media/Drm.h>
+#include <media/DrmSessionClientInterface.h>
+#include <media/DrmSessionManager.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/ProcessInfoInterface.h>
 
@@ -39,6 +39,10 @@
         return true;
     }
 
+    virtual bool isValidPid(int /* pid */) {
+        return true;
+    }
+
 private:
     DISALLOW_EVIL_CONSTRUCTORS(FakeProcessInfo);
 };
diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk
index 1353f28..e2f416b 100644
--- a/media/libnbaio/Android.mk
+++ b/media/libnbaio/Android.mk
@@ -20,19 +20,17 @@
 #LOCAL_C_INCLUDES += path/to/libsndfile/src
 #LOCAL_STATIC_LIBRARIES += libsndfile
 
-# uncomment for systrace
-# LOCAL_CFLAGS += -DATRACE_TAG=ATRACE_TAG_AUDIO
-
 LOCAL_MODULE := libnbaio
 
 LOCAL_SHARED_LIBRARIES := \
     libaudioutils \
     libbinder \
-    libcommon_time_client \
     libcutils \
     libutils \
     liblog
 
 LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
 
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libnbaio/AudioBufferProviderSource.cpp b/media/libnbaio/AudioBufferProviderSource.cpp
index 551f516..cba8b59 100644
--- a/media/libnbaio/AudioBufferProviderSource.cpp
+++ b/media/libnbaio/AudioBufferProviderSource.cpp
@@ -46,16 +46,14 @@
     return mBuffer.raw != NULL ? mBuffer.frameCount - mConsumed : 0;
 }
 
-ssize_t AudioBufferProviderSource::read(void *buffer,
-                                        size_t count,
-                                        int64_t readPTS)
+ssize_t AudioBufferProviderSource::read(void *buffer, size_t count)
 {
     if (CC_UNLIKELY(!mNegotiated)) {
         return NEGOTIATE;
     }
     if (CC_UNLIKELY(mBuffer.raw == NULL)) {
         mBuffer.frameCount = count;
-        status_t status = mProvider->getNextBuffer(&mBuffer, readPTS);
+        status_t status = mProvider->getNextBuffer(&mBuffer);
         if (status != OK) {
             return status == NOT_ENOUGH_DATA ? (ssize_t) WOULD_BLOCK : (ssize_t) status;
         }
@@ -81,8 +79,7 @@
     return count;
 }
 
-ssize_t AudioBufferProviderSource::readVia(readVia_t via, size_t total, void *user,
-                                           int64_t readPTS, size_t block)
+ssize_t AudioBufferProviderSource::readVia(readVia_t via, size_t total, void *user, size_t block)
 {
     if (CC_UNLIKELY(!mNegotiated)) {
         return NEGOTIATE;
@@ -102,7 +99,7 @@
         // 1 <= count <= block
         if (CC_UNLIKELY(mBuffer.raw == NULL)) {
             mBuffer.frameCount = count;
-            status_t status = mProvider->getNextBuffer(&mBuffer, readPTS);
+            status_t status = mProvider->getNextBuffer(&mBuffer);
             if (CC_LIKELY(status == OK)) {
                 ALOG_ASSERT(mBuffer.raw != NULL && mBuffer.frameCount <= count);
                 // mConsumed is 0 either from constructor or after releaseBuffer()
@@ -120,8 +117,8 @@
             count = available;
         }
         if (CC_LIKELY(count > 0)) {
-            char* readTgt = (char *) mBuffer.raw + (mConsumed * mFrameSize);
-            ssize_t ret = via(user, readTgt, count, readPTS);
+            ssize_t ret = via(user, (char *) mBuffer.raw + (mConsumed * mFrameSize), count);
+
             if (CC_UNLIKELY(ret <= 0)) {
                 if (CC_LIKELY(accumulator > 0)) {
                     return accumulator;
diff --git a/media/libnbaio/AudioStreamInSource.cpp b/media/libnbaio/AudioStreamInSource.cpp
index 6aab48a..2dc3050 100644
--- a/media/libnbaio/AudioStreamInSource.cpp
+++ b/media/libnbaio/AudioStreamInSource.cpp
@@ -53,7 +53,7 @@
     return NBAIO_Source::negotiate(offers, numOffers, counterOffers, numCounterOffers);
 }
 
-size_t AudioStreamInSource::framesOverrun()
+int64_t AudioStreamInSource::framesOverrun()
 {
     uint32_t framesOverrun = mStream->get_input_frames_lost(mStream);
     if (framesOverrun > 0) {
@@ -64,7 +64,7 @@
     return mFramesOverrun;
 }
 
-ssize_t AudioStreamInSource::read(void *buffer, size_t count, int64_t readPTS __unused)
+ssize_t AudioStreamInSource::read(void *buffer, size_t count)
 {
     if (CC_UNLIKELY(!Format_isValid(mFormat))) {
         return NEGOTIATE;
diff --git a/media/libnbaio/AudioStreamOutSink.cpp b/media/libnbaio/AudioStreamOutSink.cpp
index 0d5f935..ee44678 100644
--- a/media/libnbaio/AudioStreamOutSink.cpp
+++ b/media/libnbaio/AudioStreamOutSink.cpp
@@ -66,30 +66,20 @@
     return ret;
 }
 
-status_t AudioStreamOutSink::getNextWriteTimestamp(int64_t *timestamp) {
-    ALOG_ASSERT(timestamp != NULL);
-
-    if (NULL == mStream)
-        return INVALID_OPERATION;
-
-    if (NULL == mStream->get_next_write_timestamp)
-        return INVALID_OPERATION;
-
-    return mStream->get_next_write_timestamp(mStream, timestamp);
-}
-
-status_t AudioStreamOutSink::getTimestamp(AudioTimestamp& timestamp)
+status_t AudioStreamOutSink::getTimestamp(ExtendedTimestamp &timestamp)
 {
     if (mStream->get_presentation_position == NULL) {
         return INVALID_OPERATION;
     }
-    // FIXME position64 won't be needed after AudioTimestamp.mPosition is changed to uint64_t
+
     uint64_t position64;
-    int ok = mStream->get_presentation_position(mStream, &position64, &timestamp.mTime);
-    if (ok != 0) {
+    struct timespec time;
+    if (mStream->get_presentation_position(mStream, &position64, &time) != OK) {
         return INVALID_OPERATION;
     }
-    timestamp.mPosition = position64;
+    timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position64;
+    timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+            time.tv_sec * 1000000000LL + time.tv_nsec;
     return OK;
 }
 
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 129e9ef..8d1cb0f 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -19,10 +19,8 @@
 #define LOG_TAG "MonoPipe"
 //#define LOG_NDEBUG 0
 
-#include <common_time/cc_helper.h>
 #include <cutils/atomic.h>
 #include <cutils/compiler.h>
-#include <utils/LinearTransform.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <media/AudioBufferProvider.h>
@@ -32,26 +30,8 @@
 
 namespace android {
 
-static uint64_t cacheN; // output of CCHelper::getLocalFreq()
-static bool cacheValid; // whether cacheN is valid
-static pthread_once_t cacheOnceControl = PTHREAD_ONCE_INIT;
-
-static void cacheOnceInit()
-{
-    CCHelper tmpHelper;
-    status_t res;
-    if (OK != (res = tmpHelper.getLocalFreq(&cacheN))) {
-        ALOGE("Failed to fetch local time frequency when constructing a"
-              " MonoPipe (res = %d).  getNextWriteTimestamp calls will be"
-              " non-functional", res);
-        return;
-    }
-    cacheValid = true;
-}
-
 MonoPipe::MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock) :
         NBAIO_Sink(format),
-        mUpdateSeq(0),
         mReqFrames(reqFrames),
         mMaxFrames(roundup(reqFrames)),
         mBuffer(malloc(mMaxFrames * Format_frameSize(format))),
@@ -66,36 +46,6 @@
         mTimestampMutator(&mTimestampShared),
         mTimestampObserver(&mTimestampShared)
 {
-    uint64_t N, D;
-
-    mNextRdPTS = AudioBufferProvider::kInvalidPTS;
-
-    mSamplesToLocalTime.a_zero = 0;
-    mSamplesToLocalTime.b_zero = 0;
-    mSamplesToLocalTime.a_to_b_numer = 0;
-    mSamplesToLocalTime.a_to_b_denom = 0;
-
-    D = Format_sampleRate(format);
-
-    (void) pthread_once(&cacheOnceControl, cacheOnceInit);
-    if (!cacheValid) {
-        // log has already been done
-        return;
-    }
-    N = cacheN;
-
-    LinearTransform::reduce(&N, &D);
-    static const uint64_t kSignedHiBitsMask   = ~(0x7FFFFFFFull);
-    static const uint64_t kUnsignedHiBitsMask = ~(0xFFFFFFFFull);
-    if ((N & kSignedHiBitsMask) || (D & kUnsignedHiBitsMask)) {
-        ALOGE("Cannot reduce sample rate to local clock frequency ratio to fit"
-              " in a 32/32 bit rational.  (max reduction is 0x%016" PRIx64 "/0x%016" PRIx64
-              ").  getNextWriteTimestamp calls will be non-functional", N, D);
-        return;
-    }
-
-    mSamplesToLocalTime.a_to_b_numer = static_cast<int32_t>(N);
-    mSamplesToLocalTime.a_to_b_denom = static_cast<uint32_t>(D);
 }
 
 MonoPipe::~MonoPipe()
@@ -223,104 +173,6 @@
     mSetpoint = setpoint;
 }
 
-status_t MonoPipe::getNextWriteTimestamp(int64_t *timestamp)
-{
-    int32_t front;
-
-    ALOG_ASSERT(NULL != timestamp);
-
-    if (0 == mSamplesToLocalTime.a_to_b_denom)
-        return UNKNOWN_ERROR;
-
-    observeFrontAndNRPTS(&front, timestamp);
-
-    if (AudioBufferProvider::kInvalidPTS != *timestamp) {
-        // If we have a valid read-pointer and next read timestamp pair, then
-        // use the current value of the write pointer to figure out how many
-        // frames are in the buffer, and offset the timestamp by that amt.  Then
-        // next time we write to the MonoPipe, the data will hit the speakers at
-        // the next read timestamp plus the current amount of data in the
-        // MonoPipe.
-        size_t pendingFrames = (mRear - front) & (mMaxFrames - 1);
-        *timestamp = offsetTimestampByAudioFrames(*timestamp, pendingFrames);
-    }
-
-    return OK;
-}
-
-void MonoPipe::updateFrontAndNRPTS(int32_t newFront, int64_t newNextRdPTS)
-{
-    // Set the MSB of the update sequence number to indicate that there is a
-    // multi-variable update in progress.  Use an atomic store with an "acquire"
-    // barrier to make sure that the next operations cannot be re-ordered and
-    // take place before the change to mUpdateSeq is commited..
-    int32_t tmp = mUpdateSeq | 0x80000000;
-    android_atomic_acquire_store(tmp, &mUpdateSeq);
-
-    // Update mFront and mNextRdPTS
-    mFront = newFront;
-    mNextRdPTS = newNextRdPTS;
-
-    // We are finished with the update.  Compute the next sequnce number (which
-    // should be the old sequence number, plus one, and with the MSB cleared)
-    // and then store it in mUpdateSeq using an atomic store with a "release"
-    // barrier so our update operations cannot be re-ordered past the update of
-    // the sequence number.
-    tmp = (tmp + 1) & 0x7FFFFFFF;
-    android_atomic_release_store(tmp, &mUpdateSeq);
-}
-
-void MonoPipe::observeFrontAndNRPTS(int32_t *outFront, int64_t *outNextRdPTS)
-{
-    // Perform an atomic observation of mFront and mNextRdPTS.  Basically,
-    // atomically observe the sequence number, then observer the variables, then
-    // atomically observe the sequence number again.  If the two observations of
-    // the sequence number match, and the update-in-progress bit was not set,
-    // then we know we have a successful atomic observation.  Otherwise, we loop
-    // around and try again.
-    //
-    // Note, it is very important that the observer be a lower priority thread
-    // than the updater.  If the updater is lower than the observer, or they are
-    // the same priority and running with SCHED_FIFO (implying that quantum
-    // based premption is disabled) then we run the risk of deadlock.
-    int32_t seqOne, seqTwo;
-
-    do {
-        seqOne        = android_atomic_acquire_load(&mUpdateSeq);
-        *outFront     = mFront;
-        *outNextRdPTS = mNextRdPTS;
-        seqTwo        = android_atomic_release_load(&mUpdateSeq);
-    } while ((seqOne != seqTwo) || (seqOne & 0x80000000));
-}
-
-int64_t MonoPipe::offsetTimestampByAudioFrames(int64_t ts, size_t audFrames)
-{
-    if (0 == mSamplesToLocalTime.a_to_b_denom)
-        return AudioBufferProvider::kInvalidPTS;
-
-    if (ts == AudioBufferProvider::kInvalidPTS)
-        return AudioBufferProvider::kInvalidPTS;
-
-    int64_t frame_lt_duration;
-    if (!mSamplesToLocalTime.doForwardTransform(audFrames,
-                                                &frame_lt_duration)) {
-        // This should never fail, but if there is a bug which is causing it
-        // to fail, this message would probably end up flooding the logs
-        // because the conversion would probably fail forever.  Log the
-        // error, but then zero out the ratio in the linear transform so
-        // that we don't try to do any conversions from now on.  This
-        // MonoPipe's getNextWriteTimestamp is now broken for good.
-        ALOGE("Overflow when attempting to convert %zu audio frames to"
-              " duration in local time.  getNextWriteTimestamp will fail from"
-              " now on.", audFrames);
-        mSamplesToLocalTime.a_to_b_numer = 0;
-        mSamplesToLocalTime.a_to_b_denom = 0;
-        return AudioBufferProvider::kInvalidPTS;
-    }
-
-    return ts + frame_lt_duration;
-}
-
 void MonoPipe::shutdown(bool newState)
 {
     mIsShutdown = newState;
@@ -331,9 +183,14 @@
     return mIsShutdown;
 }
 
-status_t MonoPipe::getTimestamp(AudioTimestamp& timestamp)
+status_t MonoPipe::getTimestamp(ExtendedTimestamp &timestamp)
 {
-    if (mTimestampObserver.poll(timestamp)) {
+    ExtendedTimestamp ets;
+    if (mTimestampObserver.poll(ets)) {
+        timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+                ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+                ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
         return OK;
     }
     return INVALID_OPERATION;
diff --git a/media/libnbaio/MonoPipeReader.cpp b/media/libnbaio/MonoPipeReader.cpp
index e4d3ed8..01dc524 100644
--- a/media/libnbaio/MonoPipeReader.cpp
+++ b/media/libnbaio/MonoPipeReader.cpp
@@ -43,25 +43,11 @@
     return ret;
 }
 
-ssize_t MonoPipeReader::read(void *buffer, size_t count, int64_t readPTS)
+ssize_t MonoPipeReader::read(void *buffer, size_t count)
 {
-    // Compute the "next read PTS" and cache it.  Callers of read pass a read
-    // PTS indicating the local time for which they are requesting data along
-    // with a count (which is the number of audio frames they are going to
-    // ultimately pass to the next stage of the pipeline).  Offsetting readPTS
-    // by the duration of count will give us the readPTS which will be passed to
-    // us next time, assuming they system continues to operate in steady state
-    // with no discontinuities.  We stash this value so it can be used by the
-    // MonoPipe writer to imlement getNextWriteTimestamp.
-    int64_t nextReadPTS;
-    nextReadPTS = mPipe->offsetTimestampByAudioFrames(readPTS, count);
-
     // count == 0 is unlikely and not worth checking for explicitly; will be handled automatically
     ssize_t red = availableToRead();
     if (CC_UNLIKELY(red <= 0)) {
-        // Uh-oh, looks like we are underflowing.  Update the next read PTS and
-        // get out.
-        mPipe->updateFrontAndNRPTS(mPipe->mFront, nextReadPTS);
         return red;
     }
     if (CC_LIKELY((size_t) red > count)) {
@@ -80,13 +66,13 @@
                 memcpy((char *) buffer + (part1 * mFrameSize), mPipe->mBuffer, part2 * mFrameSize);
             }
         }
-        mPipe->updateFrontAndNRPTS(red + mPipe->mFront, nextReadPTS);
+        android_atomic_release_store(red + mPipe->mFront, &mPipe->mFront);
         mFramesRead += red;
     }
     return red;
 }
 
-void MonoPipeReader::onTimestamp(const AudioTimestamp& timestamp)
+void MonoPipeReader::onTimestamp(const ExtendedTimestamp &timestamp)
 {
     mPipe->mTimestampMutator.push(timestamp);
 }
diff --git a/media/libnbaio/NBAIO.cpp b/media/libnbaio/NBAIO.cpp
index d641e74..1cb4410 100644
--- a/media/libnbaio/NBAIO.cpp
+++ b/media/libnbaio/NBAIO.cpp
@@ -97,8 +97,7 @@
 }
 
 // This is a default implementation; it is expected that subclasses will optimize this.
-ssize_t NBAIO_Source::readVia(readVia_t via, size_t total, void *user,
-                              int64_t readPTS, size_t block)
+ssize_t NBAIO_Source::readVia(readVia_t via, size_t total, void *user, size_t block)
 {
     if (!mNegotiated) {
         return (ssize_t) NEGOTIATE;
@@ -117,11 +116,11 @@
         if (count > block) {
             count = block;
         }
-        ssize_t ret = read(buffer, count, readPTS);
+        ssize_t ret = read(buffer, count);
         if (ret > 0) {
             ALOG_ASSERT((size_t) ret <= count);
             size_t maxRet = ret;
-            ret = via(user, buffer, maxRet, readPTS);
+            ret = via(user, buffer, maxRet);
             if (ret > 0) {
                 ALOG_ASSERT((size_t) ret <= maxRet);
                 accumulator += ret;
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index c8e4953..a879647 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -36,7 +36,12 @@
 
 PipeReader::~PipeReader()
 {
-    int32_t readers = android_atomic_dec(&mPipe.mReaders);
+#if !LOG_NDEBUG
+    int32_t readers =
+#else
+    (void)
+#endif
+            android_atomic_dec(&mPipe.mReaders);
     ALOG_ASSERT(readers > 0);
 }
 
@@ -59,7 +64,7 @@
     return avail;
 }
 
-ssize_t PipeReader::read(void *buffer, size_t count, int64_t readPTS __unused)
+ssize_t PipeReader::read(void *buffer, size_t count)
 {
     ssize_t avail = availableToRead();
     if (CC_UNLIKELY(avail <= 0)) {
diff --git a/media/libnbaio/SourceAudioBufferProvider.cpp b/media/libnbaio/SourceAudioBufferProvider.cpp
index e21ef48..d58619f 100644
--- a/media/libnbaio/SourceAudioBufferProvider.cpp
+++ b/media/libnbaio/SourceAudioBufferProvider.cpp
@@ -45,7 +45,7 @@
     free(mAllocated);
 }
 
-status_t SourceAudioBufferProvider::getNextBuffer(Buffer *buffer, int64_t pts)
+status_t SourceAudioBufferProvider::getNextBuffer(Buffer *buffer)
 {
     ALOG_ASSERT(buffer != NULL && buffer->frameCount > 0 && mGetCount == 0);
     // any leftover data available?
@@ -61,20 +61,30 @@
     // do we need to reallocate?
     if (buffer->frameCount > mSize) {
         free(mAllocated);
-        mAllocated = malloc(buffer->frameCount * mFrameSize);
+        // Android convention is to _not_ check the return value of malloc and friends.
+        // But in this case the calloc() can also fail due to integer overflow,
+        // so we check and recover.
+        mAllocated = calloc(buffer->frameCount, mFrameSize);
+        if (mAllocated == NULL) {
+            mSize = 0;
+            goto fail;
+        }
         mSize = buffer->frameCount;
     }
-    // read from source
-    ssize_t actual = mSource->read(mAllocated, buffer->frameCount, pts);
-    if (actual > 0) {
-        ALOG_ASSERT((size_t) actual <= buffer->frameCount);
-        mOffset = 0;
-        mRemaining = actual;
-        buffer->raw = mAllocated;
-        buffer->frameCount = actual;
-        mGetCount = actual;
-        return OK;
+    {
+        // read from source
+        ssize_t actual = mSource->read(mAllocated, buffer->frameCount);
+        if (actual > 0) {
+            ALOG_ASSERT((size_t) actual <= buffer->frameCount);
+            mOffset = 0;
+            mRemaining = actual;
+            buffer->raw = mAllocated;
+            buffer->frameCount = actual;
+            mGetCount = actual;
+            return OK;
+        }
     }
+fail:
     buffer->raw = NULL;
     buffer->frameCount = 0;
     mGetCount = 0;
@@ -102,12 +112,12 @@
     return avail < 0 ? 0 : (size_t) avail;
 }
 
-size_t SourceAudioBufferProvider::framesReleased() const
+int64_t SourceAudioBufferProvider::framesReleased() const
 {
     return mFramesReleased;
 }
 
-void SourceAudioBufferProvider::onTimestamp(const AudioTimestamp& timestamp)
+void SourceAudioBufferProvider::onTimestamp(const ExtendedTimestamp &timestamp)
 {
     mSource->onTimestamp(timestamp);
 }
diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp
index 45e8a30..19efc53 100644
--- a/media/libstagefright/AACExtractor.cpp
+++ b/media/libstagefright/AACExtractor.cpp
@@ -211,7 +211,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-sp<MediaSource> AACExtractor::getTrack(size_t index) {
+sp<IMediaSource> AACExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index 9d90dbd..8b1e1c3 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -67,7 +67,7 @@
 }
 
 
-status_t AACWriter::addSource(const sp<MediaSource> &source) {
+status_t AACWriter::addSource(const sp<IMediaSource> &source) {
     if (mInitCheck != OK) {
         return mInitCheck;
     }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 6399b79..d97d5b1 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -41,7 +41,6 @@
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
 #include <media/stagefright/PersistentSurface.h>
 #include <media/stagefright/SurfaceUtils.h>
 #include <media/hardware/HardwareAPI.h>
@@ -53,9 +52,15 @@
 #include <OMX_AsString.h>
 
 #include "include/avc_utils.h"
+#include "include/DataConverter.h"
+#include "omx/OMXUtils.h"
 
 namespace android {
 
+enum {
+    kMaxIndicesToCheck = 32, // used when enumerating supported formats and profiles
+};
+
 // OMX errors are directly mapped into status_t range if
 // there is no corresponding MediaError status code.
 // Use the statusFromOMXError(int32_t omxError) function.
@@ -98,15 +103,6 @@
     }
 }
 
-template<class T>
-static void InitOMXParams(T *params) {
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
 struct MessageList : public RefBase {
     MessageList() {
     }
@@ -119,6 +115,13 @@
     DISALLOW_EVIL_CONSTRUCTORS(MessageList);
 };
 
+static sp<DataConverter> getCopyConverter() {
+    static pthread_once_t once = PTHREAD_ONCE_INIT; // const-inited
+    static sp<DataConverter> sCopyConverter;        // zero-inited
+    pthread_once(&once, [](){ sCopyConverter = new DataConverter(); });
+    return sCopyConverter;
+}
+
 struct CodecObserver : public BnOMXObserver {
     CodecObserver() {}
 
@@ -493,13 +496,15 @@
 ACodec::ACodec()
     : mQuirks(0),
       mNode(0),
+      mUsingNativeWindow(false),
       mNativeWindowUsageBits(0),
-      mSentFormat(false),
+      mLastNativeWindowDataSpace(HAL_DATASPACE_UNKNOWN),
       mIsVideo(false),
       mIsEncoder(false),
       mFatalError(false),
       mShutdownInProgress(false),
       mExplicitShutdown(false),
+      mIsLegacyVP9Decoder(false),
       mEncoderDelay(0),
       mEncoderPadding(0),
       mRotationDegrees(0),
@@ -510,13 +515,16 @@
       mOutputMetadataType(kMetadataBufferTypeInvalid),
       mLegacyAdaptiveExperiment(false),
       mMetadataBuffersToSubmit(0),
+      mNumUndequeuedBuffers(0),
       mRepeatFrameDelayUs(-1ll),
       mMaxPtsGapUs(-1ll),
       mMaxFps(-1),
       mTimePerFrameUs(-1ll),
       mTimePerCaptureUs(-1ll),
       mCreateInputBuffersSuspended(false),
-      mTunneled(false) {
+      mTunneled(false),
+      mDescribeColorAspectsIndex((OMX_INDEXTYPE)0),
+      mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0) {
     mUninitializedState = new UninitializedState(this);
     mLoadedState = new LoadedState(this);
     mLoadedToIdleState = new LoadedToIdleState(this);
@@ -533,6 +541,8 @@
     mPortEOS[kPortIndexInput] = mPortEOS[kPortIndexOutput] = false;
     mInputEOSResult = OK;
 
+    memset(&mLastNativeWindowCrop, 0, sizeof(mLastNativeWindowCrop));
+
     changeState(mUninitializedState);
 }
 
@@ -664,7 +674,10 @@
     }
 
     int usageBits = 0;
-    status_t err = setupNativeWindowSizeFormatAndUsage(nativeWindow, &usageBits);
+    // no need to reconnect as we will not dequeue all buffers
+    status_t err = setupNativeWindowSizeFormatAndUsage(
+            nativeWindow, &usageBits,
+            !storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment /* reconnect */);
     if (err != OK) {
         return err;
     }
@@ -716,7 +729,7 @@
         if (storingMetadataInDecodedBuffers()
                 && !mLegacyAdaptiveExperiment
                 && info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
-            ALOGV("skipping buffer %p", info.mGraphicBuffer->getNativeBuffer());
+            ALOGV("skipping buffer");
             continue;
         }
         ALOGV("attaching buffer %p", info.mGraphicBuffer->getNativeBuffer());
@@ -785,30 +798,58 @@
         if (err == OK) {
             MetadataBufferType type =
                 portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
-            int32_t bufSize = def.nBufferSize;
-            if (type == kMetadataBufferTypeGrallocSource) {
-                bufSize = sizeof(VideoGrallocMetadata);
-            } else if (type == kMetadataBufferTypeANWBuffer) {
+            size_t bufSize = def.nBufferSize;
+            if (type == kMetadataBufferTypeANWBuffer) {
                 bufSize = sizeof(VideoNativeMetadata);
+            } else if (type == kMetadataBufferTypeNativeHandleSource) {
+                bufSize = sizeof(VideoNativeHandleMetadata);
             }
 
             // If using gralloc or native source input metadata buffers, allocate largest
             // metadata size as we prefer to generate native source metadata, but component
             // may require gralloc source. For camera source, allocate at least enough
             // size for native metadata buffers.
-            int32_t allottedSize = bufSize;
-            if (portIndex == kPortIndexInput && type >= kMetadataBufferTypeGrallocSource) {
+            size_t allottedSize = bufSize;
+            if (portIndex == kPortIndexInput && type == kMetadataBufferTypeANWBuffer) {
                 bufSize = max(sizeof(VideoGrallocMetadata), sizeof(VideoNativeMetadata));
             } else if (portIndex == kPortIndexInput && type == kMetadataBufferTypeCameraSource) {
-                bufSize = max(bufSize, (int32_t)sizeof(VideoNativeMetadata));
+                bufSize = max(bufSize, sizeof(VideoNativeMetadata));
             }
 
-            ALOGV("[%s] Allocating %u buffers of size %d/%d (from %u using %s) on %s port",
+            size_t conversionBufferSize = 0;
+
+            sp<DataConverter> converter = mConverter[portIndex];
+            if (converter != NULL) {
+                // here we assume sane conversions of max 4:1, so result fits in int32
+                if (portIndex == kPortIndexInput) {
+                    conversionBufferSize = converter->sourceSize(bufSize);
+                } else {
+                    conversionBufferSize = converter->targetSize(bufSize);
+                }
+            }
+
+            size_t alignment = MemoryDealer::getAllocationAlignment();
+
+            ALOGV("[%s] Allocating %u buffers of size %zu/%zu (from %u using %s) on %s port",
                     mComponentName.c_str(),
                     def.nBufferCountActual, bufSize, allottedSize, def.nBufferSize, asString(type),
                     portIndex == kPortIndexInput ? "input" : "output");
 
-            size_t totalSize = def.nBufferCountActual * bufSize;
+            // verify buffer sizes to avoid overflow in align()
+            if (bufSize == 0 || max(bufSize, conversionBufferSize) > kMaxCodecBufferSize) {
+                ALOGE("b/22885421");
+                return NO_MEMORY;
+            }
+
+            // don't modify bufSize as OMX may not expect it to increase after negotiation
+            size_t alignedSize = align(bufSize, alignment);
+            size_t alignedConvSize = align(conversionBufferSize, alignment);
+            if (def.nBufferCountActual > SIZE_MAX / (alignedSize + alignedConvSize)) {
+                ALOGE("b/22885421");
+                return NO_MEMORY;
+            }
+
+            size_t totalSize = def.nBufferCountActual * (alignedSize + alignedConvSize);
             mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
 
             for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
@@ -821,22 +862,36 @@
                 info.mStatus = BufferInfo::OWNED_BY_US;
                 info.mFenceFd = -1;
                 info.mRenderInfo = NULL;
+                info.mNativeHandle = NULL;
 
                 uint32_t requiresAllocateBufferBit =
                     (portIndex == kPortIndexInput)
-                        ? OMXCodec::kRequiresAllocateBufferOnInputPorts
-                        : OMXCodec::kRequiresAllocateBufferOnOutputPorts;
+                        ? kRequiresAllocateBufferOnInputPorts
+                        : kRequiresAllocateBufferOnOutputPorts;
 
-                if ((portIndex == kPortIndexInput && (mFlags & kFlagIsSecure))
-                        || (portIndex == kPortIndexOutput && usingMetadataOnEncoderOutput())) {
+                if (portIndex == kPortIndexInput && (mFlags & kFlagIsSecure)) {
                     mem.clear();
 
-                    void *ptr;
-                    err = mOMX->allocateBuffer(
+                    void *ptr = NULL;
+                    sp<NativeHandle> native_handle;
+                    err = mOMX->allocateSecureBuffer(
                             mNode, portIndex, bufSize, &info.mBufferID,
-                            &ptr);
+                            &ptr, &native_handle);
 
-                    info.mData = new ABuffer(ptr, bufSize);
+                    // TRICKY: this representation is unorthodox, but ACodec requires
+                    // an ABuffer with a proper size to validate range offsets and lengths.
+                    // Since mData is never referenced for secure input, it is used to store
+                    // either the pointer to the secure buffer, or the opaque handle as on
+                    // some devices ptr is actually an opaque handle, not a pointer.
+
+                    // TRICKY2: use native handle as the base of the ABuffer if received one,
+                    // because Widevine source only receives these base addresses.
+                    const native_handle_t *native_handle_ptr =
+                        native_handle == NULL ? NULL : native_handle->handle();
+                    info.mData = new ABuffer(
+                            ptr != NULL ? ptr : (void *)native_handle_ptr, bufSize);
+                    info.mNativeHandle = native_handle;
+                    info.mCodecData = info.mData;
                 } else if (mQuirks & requiresAllocateBufferBit) {
                     err = mOMX->allocateBufferWithBackup(
                             mNode, portIndex, mem, &info.mBufferID, allottedSize);
@@ -845,10 +900,27 @@
                 }
 
                 if (mem != NULL) {
-                    info.mData = new ABuffer(mem->pointer(), bufSize);
+                    info.mCodecData = new ABuffer(mem->pointer(), bufSize);
+                    info.mCodecRef = mem;
+
                     if (type == kMetadataBufferTypeANWBuffer) {
                         ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
                     }
+
+                    // if we require conversion, allocate conversion buffer for client use;
+                    // otherwise, reuse codec buffer
+                    if (mConverter[portIndex] != NULL) {
+                        CHECK_GT(conversionBufferSize, (size_t)0);
+                        mem = mDealer[portIndex]->allocate(conversionBufferSize);
+                        if (mem == NULL|| mem->pointer() == NULL) {
+                            return NO_MEMORY;
+                        }
+                        info.mData = new ABuffer(mem->pointer(), conversionBufferSize);
+                        info.mMemRef = mem;
+                    } else {
+                        info.mData = info.mCodecData;
+                        info.mMemRef = info.mCodecRef;
+                    }
                 }
 
                 mBuffers[portIndex].push(info);
@@ -869,8 +941,7 @@
 
     for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
         const BufferInfo &info = mBuffers[portIndex][i];
-
-        desc->addBuffer(info.mBufferID, info.mData);
+        desc->addBuffer(info.mBufferID, info.mData, info.mNativeHandle, info.mMemRef);
     }
 
     notify->setObject("portDesc", desc);
@@ -880,7 +951,8 @@
 }
 
 status_t ACodec::setupNativeWindowSizeFormatAndUsage(
-        ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */) {
+        ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */,
+        bool reconnect) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexOutput;
@@ -908,6 +980,9 @@
     usage |= kVideoGrallocUsage;
     *finalUsage = usage;
 
+    memset(&mLastNativeWindowCrop, 0, sizeof(mLastNativeWindowCrop));
+    mLastNativeWindowDataSpace = HAL_DATASPACE_UNKNOWN;
+
     ALOGV("gralloc usage: %#x(OMX) => %#x(ACodec)", omxUsage, usage);
     return setNativeWindowSizeFormatAndUsage(
             nativeWindow,
@@ -915,12 +990,14 @@
             def.format.video.nFrameHeight,
             def.format.video.eColorFormat,
             mRotationDegrees,
-            usage);
+            usage,
+            reconnect);
 }
 
 status_t ACodec::configureOutputBuffersFromNativeWindow(
         OMX_U32 *bufferCount, OMX_U32 *bufferSize,
-        OMX_U32 *minUndequeuedBuffers) {
+        OMX_U32 *minUndequeuedBuffers, bool preregister) {
+
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexOutput;
@@ -929,7 +1006,8 @@
             mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
 
     if (err == OK) {
-        err = setupNativeWindowSizeFormatAndUsage(mNativeWindow.get(), &mNativeWindowUsageBits);
+        err = setupNativeWindowSizeFormatAndUsage(
+                mNativeWindow.get(), &mNativeWindowUsageBits, preregister /* reconnect */);
     }
     if (err != OK) {
         mNativeWindowUsageBits = 0;
@@ -1011,7 +1089,7 @@
 status_t ACodec::allocateOutputBuffersFromNativeWindow() {
     OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
     status_t err = configureOutputBuffersFromNativeWindow(
-            &bufferCount, &bufferSize, &minUndequeuedBuffers);
+            &bufferCount, &bufferSize, &minUndequeuedBuffers, true /* preregister */);
     if (err != 0)
         return err;
     mNumUndequeuedBuffers = minUndequeuedBuffers;
@@ -1042,6 +1120,7 @@
         info.mIsReadFence = false;
         info.mRenderInfo = NULL;
         info.mData = new ABuffer(NULL /* data */, bufferSize /* capacity */);
+        info.mCodecData = info.mData;
         info.mGraphicBuffer = graphicBuffer;
         mBuffers[kPortIndexOutput].push(info);
 
@@ -1096,7 +1175,8 @@
 status_t ACodec::allocateOutputMetadataBuffers() {
     OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
     status_t err = configureOutputBuffersFromNativeWindow(
-            &bufferCount, &bufferSize, &minUndequeuedBuffers);
+            &bufferCount, &bufferSize, &minUndequeuedBuffers,
+            mLegacyAdaptiveExperiment /* preregister */);
     if (err != 0)
         return err;
     mNumUndequeuedBuffers = minUndequeuedBuffers;
@@ -1106,7 +1186,7 @@
 
     size_t bufSize = mOutputMetadataType == kMetadataBufferTypeANWBuffer ?
             sizeof(struct VideoNativeMetadata) : sizeof(struct VideoGrallocMetadata);
-    size_t totalSize = bufferCount * bufSize;
+    size_t totalSize = bufferCount * align(bufSize, MemoryDealer::getAllocationAlignment());
     mDealer[kPortIndexOutput] = new MemoryDealer(totalSize, "ACodec");
 
     // Dequeue buffers and send them to OMX
@@ -1126,11 +1206,13 @@
             ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
         }
         info.mData = new ABuffer(mem->pointer(), mem->size());
+        info.mMemRef = mem;
+        info.mCodecData = info.mData;
+        info.mCodecRef = mem;
 
         // we use useBuffer for metadata regardless of quirks
         err = mOMX->useBuffer(
                 mNode, kPortIndexOutput, mem, &info.mBufferID, mem->size());
-
         mBuffers[kPortIndexOutput].push(info);
 
         ALOGV("[%s] allocated meta buffer with ID %u (pointer = %p)",
@@ -1327,7 +1409,8 @@
         }
 
         bool stale = false;
-        for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
+        for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
+            i--;
             BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
 
             if (info->mGraphicBuffer != NULL &&
@@ -1370,7 +1453,8 @@
 
     // get oldest undequeued buffer
     BufferInfo *oldest = NULL;
-    for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
+    for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
+        i--;
         BufferInfo *info =
             &mBuffers[kPortIndexOutput].editItemAt(i);
         if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW &&
@@ -1523,6 +1607,21 @@
 
 status_t ACodec::setComponentRole(
         bool isEncoder, const char *mime) {
+    const char *role = getComponentRole(isEncoder, mime);
+    if (role == NULL) {
+        return BAD_VALUE;
+    }
+    status_t err = setComponentRole(mOMX, mNode, role);
+    if (err != OK) {
+        ALOGW("[%s] Failed to set standard component role '%s'.",
+             mComponentName.c_str(), role);
+    }
+    return err;
+}
+
+//static
+const char *ACodec::getComponentRole(
+        bool isEncoder, const char *mime) {
     struct MimeToRole {
         const char *mime;
         const char *decoderRole;
@@ -1564,6 +1663,8 @@
             "video_decoder.vp9", "video_encoder.vp9" },
         { MEDIA_MIMETYPE_AUDIO_RAW,
             "audio_decoder.raw", "audio_encoder.raw" },
+        { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+            "video_decoder.dolby-vision", "video_encoder.dolby-vision" },
         { MEDIA_MIMETYPE_AUDIO_FLAC,
             "audio_decoder.flac", "audio_encoder.flac" },
         { MEDIA_MIMETYPE_AUDIO_MSGSM,
@@ -1587,35 +1688,27 @@
     }
 
     if (i == kNumMimeToRole) {
-        return ERROR_UNSUPPORTED;
+        return NULL;
     }
 
-    const char *role =
-        isEncoder ? kMimeToRole[i].encoderRole
+    return isEncoder ? kMimeToRole[i].encoderRole
                   : kMimeToRole[i].decoderRole;
+}
 
-    if (role != NULL) {
-        OMX_PARAM_COMPONENTROLETYPE roleParams;
-        InitOMXParams(&roleParams);
+//static
+status_t ACodec::setComponentRole(
+        const sp<IOMX> &omx, IOMX::node_id node, const char *role) {
+    OMX_PARAM_COMPONENTROLETYPE roleParams;
+    InitOMXParams(&roleParams);
 
-        strncpy((char *)roleParams.cRole,
-                role, OMX_MAX_STRINGNAME_SIZE - 1);
+    strncpy((char *)roleParams.cRole,
+            role, OMX_MAX_STRINGNAME_SIZE - 1);
 
-        roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
+    roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
 
-        status_t err = mOMX->setParameter(
-                mNode, OMX_IndexParamStandardComponentRole,
-                &roleParams, sizeof(roleParams));
-
-        if (err != OK) {
-            ALOGW("[%s] Failed to set standard component role '%s'.",
-                 mComponentName.c_str(), role);
-
-            return err;
-        }
-    }
-
-    return OK;
+    return omx->setParameter(
+            node, OMX_IndexParamStandardComponentRole,
+            &roleParams, sizeof(roleParams));
 }
 
 status_t ACodec::configureCodec(
@@ -1625,8 +1718,9 @@
         encoder = false;
     }
 
-    sp<AMessage> inputFormat = new AMessage();
-    sp<AMessage> outputFormat = mNotify->dup(); // will use this for kWhatOutputFormatChanged
+    sp<AMessage> inputFormat = new AMessage;
+    sp<AMessage> outputFormat = new AMessage;
+    mConfigFormat = msg;
 
     mIsEncoder = encoder;
 
@@ -1646,21 +1740,30 @@
         return INVALID_OPERATION;
     }
 
+    // propagate bitrate to the output so that the muxer has it
+    if (encoder && msg->findInt32("bitrate", &bitRate)) {
+        // Technically ISO spec says that 'bitrate' should be 0 for VBR even though it is the
+        // average bitrate. We've been setting both bitrate and max-bitrate to this same value.
+        outputFormat->setInt32("bitrate", bitRate);
+        outputFormat->setInt32("max-bitrate", bitRate);
+    }
+
     int32_t storeMeta;
     if (encoder
-            && msg->findInt32("store-metadata-in-buffers", &storeMeta)
-            && storeMeta != 0) {
-        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE, &mInputMetadataType);
+            && msg->findInt32("android._input-metadata-buffer-type", &storeMeta)
+            && storeMeta != kMetadataBufferTypeInvalid) {
+        mInputMetadataType = (MetadataBufferType)storeMeta;
+        err = mOMX->storeMetaDataInBuffers(
+                mNode, kPortIndexInput, OMX_TRUE, &mInputMetadataType);
         if (err != OK) {
             ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
                     mComponentName.c_str(), err);
 
             return err;
-        }
-        // For this specific case we could be using camera source even if storeMetaDataInBuffers
-        // returns Gralloc source. Pretend that we are; this will force us to use nBufferSize.
-        if (mInputMetadataType == kMetadataBufferTypeGrallocSource) {
-            mInputMetadataType = kMetadataBufferTypeCameraSource;
+        } else if (storeMeta == kMetadataBufferTypeANWBuffer
+                && mInputMetadataType == kMetadataBufferTypeGrallocSource) {
+            // IOMX translates ANWBuffers to gralloc source already.
+            mInputMetadataType = (MetadataBufferType)storeMeta;
         }
 
         uint32_t usageBits;
@@ -1706,9 +1809,10 @@
     mIsVideo = video;
     if (encoder && video) {
         OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
-            && msg->findInt32("store-metadata-in-buffers-output", &storeMeta)
+            && msg->findInt32("android._store-metadata-in-buffers-output", &storeMeta)
             && storeMeta != 0);
 
+        mOutputMetadataType = kMetadataBufferTypeNativeHandleSource;
         err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable, &mOutputMetadataType);
         if (err != OK) {
             ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d",
@@ -1744,6 +1848,7 @@
     sp<RefBase> obj;
     bool haveNativeWindow = msg->findObject("native-window", &obj)
             && obj != NULL && video && !encoder;
+    mUsingNativeWindow = haveNativeWindow;
     mLegacyAdaptiveExperiment = false;
     if (video && !encoder) {
         inputFormat->setInt32("adaptive-playback", false);
@@ -1757,6 +1862,14 @@
             mFlags |= kFlagIsGrallocUsageProtected;
             mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
         }
+
+        if (mFlags & kFlagIsSecure) {
+            // use native_handles for secure input buffers
+            err = mOMX->enableNativeBuffers(
+                    mNode, kPortIndexInput, OMX_FALSE /* graphic */, OMX_TRUE);
+            ALOGI_IF(err != OK, "falling back to non-native_handles");
+            err = OK; // ignore error for now
+        }
     }
     if (haveNativeWindow) {
         sp<ANativeWindow> nativeWindow =
@@ -1828,6 +1941,7 @@
             }
 
             // Always try to enable dynamic output buffers on native surface
+            mOutputMetadataType = kMetadataBufferTypeANWBuffer;
             err = mOMX->storeMetaDataInBuffers(
                     mNode, kPortIndexOutput, OMX_TRUE, &mOutputMetadataType);
             if (err != OK) {
@@ -1903,6 +2017,10 @@
         }
     }
 
+    AudioEncoding pcmEncoding = kAudioEncodingPcm16bit;
+    (void)msg->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+    // invalid encodings will default to PCM-16bit in setupRawAudioFormat.
+
     if (video) {
         // determine need for software renderer
         bool usingSwRenderer = false;
@@ -1912,9 +2030,9 @@
         }
 
         if (encoder) {
-            err = setupVideoEncoder(mime, msg);
+            err = setupVideoEncoder(mime, msg, outputFormat, inputFormat);
         } else {
-            err = setupVideoDecoder(mime, msg, haveNativeWindow);
+            err = setupVideoDecoder(mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
         }
 
         if (err != OK) {
@@ -1968,13 +2086,15 @@
                         inputFormat->setInt32("adaptive-playback", false);
                     }
                     if (err == OK) {
-                        err = mOMX->enableGraphicBuffers(mNode, kPortIndexOutput, OMX_FALSE);
+                        err = mOMX->enableNativeBuffers(
+                                mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_FALSE);
                     }
                     if (mFlags & kFlagIsGrallocUsageProtected) {
                         // fallback is not supported for protected playback
                         err = PERMISSION_DENIED;
                     } else if (err == OK) {
-                        err = setupVideoDecoder(mime, msg, false);
+                        err = setupVideoDecoder(
+                                mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
                     }
                 }
             }
@@ -2105,7 +2225,7 @@
                 || !msg->findInt32("sample-rate", &sampleRate)) {
             err = INVALID_OPERATION;
         } else {
-            err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+            err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels, pcmEncoding);
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
         int32_t numChannels;
@@ -2167,7 +2287,10 @@
         err = setOperatingRate(rateFloat, video);
     }
 
+    // NOTE: both mBaseOutputFormat and mOutputFormat are outputFormat to signal first frame.
     mBaseOutputFormat = outputFormat;
+    // trigger a kWhatOutputFormatChanged msg on first buffer
+    mLastOutputFormat.clear();
 
     err = getPortFormat(kPortIndexInput, inputFormat);
     if (err == OK) {
@@ -2177,6 +2300,25 @@
             mOutputFormat = outputFormat;
         }
     }
+
+    // create data converters if needed
+    if (!video && err == OK) {
+        AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
+        if (encoder) {
+            (void)mInputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
+            mConverter[kPortIndexInput] = AudioConverter::Create(pcmEncoding, codecPcmEncoding);
+            if (mConverter[kPortIndexInput] != NULL) {
+                mInputFormat->setInt32("pcm-encoding", pcmEncoding);
+            }
+        } else {
+            (void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
+            mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
+            if (mConverter[kPortIndexOutput] != NULL) {
+                mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
+            }
+        }
+    }
+
     return err;
 }
 
@@ -2224,6 +2366,102 @@
     return OK;
 }
 
+status_t ACodec::getIntraRefreshPeriod(uint32_t *intraRefreshPeriod) {
+    OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
+    InitOMXParams(&params);
+    params.nPortIndex = kPortIndexOutput;
+    status_t err = mOMX->getConfig(
+            mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, &params, sizeof(params));
+    if (err == OK) {
+        *intraRefreshPeriod = params.nRefreshPeriod;
+        return OK;
+    }
+
+    // Fallback to query through standard OMX index.
+    OMX_VIDEO_PARAM_INTRAREFRESHTYPE refreshParams;
+    InitOMXParams(&refreshParams);
+    refreshParams.nPortIndex = kPortIndexOutput;
+    refreshParams.eRefreshMode = OMX_VIDEO_IntraRefreshCyclic;
+    err = mOMX->getParameter(
+            mNode, OMX_IndexParamVideoIntraRefresh, &refreshParams, sizeof(refreshParams));
+    if (err != OK || refreshParams.nCirMBs == 0) {
+        *intraRefreshPeriod = 0;
+        return OK;
+    }
+
+    // Calculate period based on width and height
+    uint32_t width, height;
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+    OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
+    def.nPortIndex = kPortIndexOutput;
+    err = mOMX->getParameter(
+            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != OK) {
+        *intraRefreshPeriod = 0;
+        return err;
+    }
+    width = video_def->nFrameWidth;
+    height = video_def->nFrameHeight;
+    // Use H.264/AVC MacroBlock size 16x16
+    *intraRefreshPeriod = divUp((divUp(width, 16u) * divUp(height, 16u)), refreshParams.nCirMBs);
+
+    return OK;
+}
+
+status_t ACodec::setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure) {
+    OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
+    InitOMXParams(&params);
+    params.nPortIndex = kPortIndexOutput;
+    params.nRefreshPeriod = intraRefreshPeriod;
+    status_t err = mOMX->setConfig(
+            mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, &params, sizeof(params));
+    if (err == OK) {
+        return OK;
+    }
+
+    // Only in configure state, a component could invoke setParameter.
+    if (!inConfigure) {
+        return INVALID_OPERATION;
+    } else {
+        ALOGI("[%s] try falling back to Cyclic", mComponentName.c_str());
+    }
+
+    OMX_VIDEO_PARAM_INTRAREFRESHTYPE refreshParams;
+    InitOMXParams(&refreshParams);
+    refreshParams.nPortIndex = kPortIndexOutput;
+    refreshParams.eRefreshMode = OMX_VIDEO_IntraRefreshCyclic;
+
+    if (intraRefreshPeriod == 0) {
+        // 0 means disable intra refresh.
+        refreshParams.nCirMBs = 0;
+    } else {
+        // Calculate macroblocks that need to be intra coded base on width and height
+        uint32_t width, height;
+        OMX_PARAM_PORTDEFINITIONTYPE def;
+        InitOMXParams(&def);
+        OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
+        def.nPortIndex = kPortIndexOutput;
+        err = mOMX->getParameter(
+                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+        if (err != OK) {
+            return err;
+        }
+        width = video_def->nFrameWidth;
+        height = video_def->nFrameHeight;
+        // Use H.264/AVC MacroBlock size 16x16
+        refreshParams.nCirMBs = divUp((divUp(width, 16u) * divUp(height, 16u)), intraRefreshPeriod);
+    }
+
+    err = mOMX->setParameter(mNode, OMX_IndexParamVideoIntraRefresh,
+                             &refreshParams, sizeof(refreshParams));
+    if (err != OK) {
+        return err;
+    }
+
+    return OK;
+}
+
 status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
@@ -2270,9 +2508,8 @@
     InitOMXParams(&format);
 
     format.nPortIndex = portIndex;
-    for (OMX_U32 index = 0;; ++index) {
+    for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
         format.nIndex = index;
-
         status_t err = mOMX->getParameter(
                 mNode, OMX_IndexParamAudioPortFormat,
                 &format, sizeof(format));
@@ -2284,6 +2521,13 @@
         if (format.eEncoding == desiredFormat) {
             break;
         }
+
+        if (index == kMaxIndicesToCheck) {
+            ALOGW("[%s] stopping checking formats after %u: %s(%x)",
+                    mComponentName.c_str(), index,
+                    asString(format.eEncoding), format.eEncoding);
+            return ERROR_UNSUPPORTED;
+        }
     }
 
     return mOMX->setParameter(
@@ -2625,7 +2869,7 @@
 }
 
 status_t ACodec::setupRawAudioFormat(
-        OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels) {
+        OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels, AudioEncoding encoding) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
     def.nPortIndex = portIndex;
@@ -2658,9 +2902,23 @@
     }
 
     pcmParams.nChannels = numChannels;
-    pcmParams.eNumData = OMX_NumericalDataSigned;
+    switch (encoding) {
+        case kAudioEncodingPcm8bit:
+            pcmParams.eNumData = OMX_NumericalDataUnsigned;
+            pcmParams.nBitPerSample = 8;
+            break;
+        case kAudioEncodingPcmFloat:
+            pcmParams.eNumData = OMX_NumericalDataFloat;
+            pcmParams.nBitPerSample = 32;
+            break;
+        case kAudioEncodingPcm16bit:
+            pcmParams.eNumData = OMX_NumericalDataSigned;
+            pcmParams.nBitPerSample = 16;
+            break;
+        default:
+            return BAD_VALUE;
+    }
     pcmParams.bInterleaved = OMX_TRUE;
-    pcmParams.nBitPerSample = 16;
     pcmParams.nSamplingRate = sampleRate;
     pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
 
@@ -2668,8 +2926,17 @@
         return OMX_ErrorNone;
     }
 
-    return mOMX->setParameter(
+    err = mOMX->setParameter(
             mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+    // if we could not set up raw format to non-16-bit, try with 16-bit
+    // NOTE: we will also verify this via readback, in case codec ignores these fields
+    if (err != OK && encoding != kAudioEncodingPcm16bit) {
+        pcmParams.eNumData = OMX_NumericalDataSigned;
+        pcmParams.nBitPerSample = 16;
+        err = mOMX->setParameter(
+                mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+    }
+    return err;
 }
 
 status_t ACodec::configureTunneledVideoPlayback(
@@ -2704,8 +2971,7 @@
     format.nIndex = 0;
     bool found = false;
 
-    OMX_U32 index = 0;
-    for (;;) {
+    for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
         format.nIndex = index;
         status_t err = mOMX->getParameter(
                 mNode, OMX_IndexParamVideoPortFormat,
@@ -2750,7 +3016,12 @@
             break;
         }
 
-        ++index;
+        if (index == kMaxIndicesToCheck) {
+            ALOGW("[%s] stopping checking formats after %u: %s(%x)/%s(%x)",
+                    mComponentName.c_str(), index,
+                    asString(format.eCompressionFormat), format.eCompressionFormat,
+                    asString(format.eColorFormat), format.eColorFormat);
+        }
     }
 
     if (!found) {
@@ -2840,6 +3111,7 @@
     { MEDIA_MIMETYPE_VIDEO_MPEG2, OMX_VIDEO_CodingMPEG2 },
     { MEDIA_MIMETYPE_VIDEO_VP8, OMX_VIDEO_CodingVP8 },
     { MEDIA_MIMETYPE_VIDEO_VP9, OMX_VIDEO_CodingVP9 },
+    { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, OMX_VIDEO_CodingDolbyVision },
 };
 
 static status_t GetVideoCodingTypeFromMime(
@@ -2875,7 +3147,8 @@
 }
 
 status_t ACodec::setupVideoDecoder(
-        const char *mime, const sp<AMessage> &msg, bool haveNativeWindow) {
+        const char *mime, const sp<AMessage> &msg, bool haveNativeWindow,
+        bool usingSwRenderer, sp<AMessage> &outputFormat) {
     int32_t width, height;
     if (!msg->findInt32("width", &width)
             || !msg->findInt32("height", &height)) {
@@ -2889,6 +3162,20 @@
         return err;
     }
 
+    if (compressionFormat == OMX_VIDEO_CodingVP9) {
+        OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
+        InitOMXParams(&params);
+        params.nPortIndex = kPortIndexInput;
+        // Check if VP9 decoder advertises supported profiles.
+        params.nProfileIndex = 0;
+        status_t err = mOMX->getParameter(
+                mNode,
+                OMX_IndexParamVideoProfileLevelQuerySupported,
+                &params,
+                sizeof(params));
+        mIsLegacyVP9Decoder = err != OK;
+    }
+
     err = setVideoPortFormatType(
             kPortIndexInput, compressionFormat, OMX_COLOR_FormatUnused);
 
@@ -2938,10 +3225,346 @@
         return err;
     }
 
+    err = setColorAspectsForVideoDecoder(
+            width, height, haveNativeWindow | usingSwRenderer, msg, outputFormat);
+    if (err == ERROR_UNSUPPORTED) { // support is optional
+        err = OK;
+    }
+
+    if (err != OK) {
+        return err;
+    }
+
+    err = setHDRStaticInfoForVideoCodec(kPortIndexOutput, msg, outputFormat);
+    if (err == ERROR_UNSUPPORTED) { // support is optional
+        err = OK;
+    }
+    return err;
+}
+
+status_t ACodec::initDescribeColorAspectsIndex() {
+    status_t err = mOMX->getExtensionIndex(
+            mNode, "OMX.google.android.index.describeColorAspects", &mDescribeColorAspectsIndex);
+    if (err != OK) {
+        mDescribeColorAspectsIndex = (OMX_INDEXTYPE)0;
+    }
+    return err;
+}
+
+status_t ACodec::setCodecColorAspects(DescribeColorAspectsParams &params, bool verify) {
+    status_t err = ERROR_UNSUPPORTED;
+    if (mDescribeColorAspectsIndex) {
+        err = mOMX->setConfig(mNode, mDescribeColorAspectsIndex, &params, sizeof(params));
+    }
+    ALOGV("[%s] setting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+            mComponentName.c_str(),
+            params.sAspects.mRange, asString(params.sAspects.mRange),
+            params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+            params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+            params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+            err, asString(err));
+
+    if (verify && err == OK) {
+        err = getCodecColorAspects(params);
+    }
+
+    ALOGW_IF(err == ERROR_UNSUPPORTED && mDescribeColorAspectsIndex,
+            "[%s] setting color aspects failed even though codec advertises support",
+            mComponentName.c_str());
+    return err;
+}
+
+status_t ACodec::setColorAspectsForVideoDecoder(
+        int32_t width, int32_t height, bool usingNativeWindow,
+        const sp<AMessage> &configFormat, sp<AMessage> &outputFormat) {
+    DescribeColorAspectsParams params;
+    InitOMXParams(&params);
+    params.nPortIndex = kPortIndexOutput;
+
+    getColorAspectsFromFormat(configFormat, params.sAspects);
+    if (usingNativeWindow) {
+        setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+        // The default aspects will be set back to the output format during the
+        // getFormat phase of configure(). Set non-Unspecified values back into the
+        // format, in case component does not support this enumeration.
+        setColorAspectsIntoFormat(params.sAspects, outputFormat);
+    }
+
+    (void)initDescribeColorAspectsIndex();
+
+    // communicate color aspects to codec
+    return setCodecColorAspects(params);
+}
+
+status_t ACodec::getCodecColorAspects(DescribeColorAspectsParams &params) {
+    status_t err = ERROR_UNSUPPORTED;
+    if (mDescribeColorAspectsIndex) {
+        err = mOMX->getConfig(mNode, mDescribeColorAspectsIndex, &params, sizeof(params));
+    }
+    ALOGV("[%s] got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+            mComponentName.c_str(),
+            params.sAspects.mRange, asString(params.sAspects.mRange),
+            params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+            params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+            params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+            err, asString(err));
+    if (params.bRequestingDataSpace) {
+        ALOGV("for dataspace %#x", params.nDataSpace);
+    }
+    if (err == ERROR_UNSUPPORTED && mDescribeColorAspectsIndex
+            && !params.bRequestingDataSpace && !params.bDataSpaceChanged) {
+        ALOGW("[%s] getting color aspects failed even though codec advertises support",
+                mComponentName.c_str());
+    }
+    return err;
+}
+
+status_t ACodec::getInputColorAspectsForVideoEncoder(sp<AMessage> &format) {
+    DescribeColorAspectsParams params;
+    InitOMXParams(&params);
+    params.nPortIndex = kPortIndexInput;
+    status_t err = getCodecColorAspects(params);
+    if (err == OK) {
+        // we only set encoder input aspects if codec supports them
+        setColorAspectsIntoFormat(params.sAspects, format, true /* force */);
+    }
+    return err;
+}
+
+status_t ACodec::getDataSpace(
+        DescribeColorAspectsParams &params, android_dataspace *dataSpace /* nonnull */,
+        bool tryCodec) {
+    status_t err = OK;
+    if (tryCodec) {
+        // request dataspace guidance from codec.
+        params.bRequestingDataSpace = OMX_TRUE;
+        err = getCodecColorAspects(params);
+        params.bRequestingDataSpace = OMX_FALSE;
+        if (err == OK && params.nDataSpace != HAL_DATASPACE_UNKNOWN) {
+            *dataSpace = (android_dataspace)params.nDataSpace;
+            return err;
+        } else if (err == ERROR_UNSUPPORTED) {
+            // ignore not-implemented error for dataspace requests
+            err = OK;
+        }
+    }
+
+    // this returns legacy versions if available
+    *dataSpace = getDataSpaceForColorAspects(params.sAspects, true /* mayexpand */);
+    ALOGV("[%s] using color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+          "and dataspace %#x",
+            mComponentName.c_str(),
+            params.sAspects.mRange, asString(params.sAspects.mRange),
+            params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+            params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+            params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+            *dataSpace);
+    return err;
+}
+
+
+status_t ACodec::getColorAspectsAndDataSpaceForVideoDecoder(
+        int32_t width, int32_t height, const sp<AMessage> &configFormat, sp<AMessage> &outputFormat,
+        android_dataspace *dataSpace) {
+    DescribeColorAspectsParams params;
+    InitOMXParams(&params);
+    params.nPortIndex = kPortIndexOutput;
+
+    // reset default format and get resulting format
+    getColorAspectsFromFormat(configFormat, params.sAspects);
+    if (dataSpace != NULL) {
+        setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+    }
+    status_t err = setCodecColorAspects(params, true /* readBack */);
+
+    // we always set specified aspects for decoders
+    setColorAspectsIntoFormat(params.sAspects, outputFormat);
+
+    if (dataSpace != NULL) {
+        status_t res = getDataSpace(params, dataSpace, err == OK /* tryCodec */);
+        if (err == OK) {
+            err = res;
+        }
+    }
+
+    return err;
+}
+
+// initial video encoder setup for bytebuffer mode
+status_t ACodec::setColorAspectsForVideoEncoder(
+        const sp<AMessage> &configFormat, sp<AMessage> &outputFormat, sp<AMessage> &inputFormat) {
+    // copy config to output format as this is not exposed via getFormat
+    copyColorConfig(configFormat, outputFormat);
+
+    DescribeColorAspectsParams params;
+    InitOMXParams(&params);
+    params.nPortIndex = kPortIndexInput;
+    getColorAspectsFromFormat(configFormat, params.sAspects);
+
+    (void)initDescribeColorAspectsIndex();
+
+    int32_t usingRecorder;
+    if (configFormat->findInt32("android._using-recorder", &usingRecorder) && usingRecorder) {
+        android_dataspace dataSpace = HAL_DATASPACE_BT709;
+        int32_t width, height;
+        if (configFormat->findInt32("width", &width)
+                && configFormat->findInt32("height", &height)) {
+            setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+            status_t err = getDataSpace(
+                    params, &dataSpace, mDescribeColorAspectsIndex /* tryCodec */);
+            if (err != OK) {
+                return err;
+            }
+            setColorAspectsIntoFormat(params.sAspects, outputFormat);
+        }
+        inputFormat->setInt32("android._dataspace", (int32_t)dataSpace);
+    }
+
+    // communicate color aspects to codec, but do not allow change of the platform aspects
+    ColorAspects origAspects = params.sAspects;
+    for (int triesLeft = 2; --triesLeft >= 0; ) {
+        status_t err = setCodecColorAspects(params, true /* readBack */);
+        if (err != OK
+                || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+                        params.sAspects, origAspects, true /* usePlatformAspects */)) {
+            return err;
+        }
+        ALOGW_IF(triesLeft == 0, "[%s] Codec repeatedly changed requested ColorAspects.",
+                mComponentName.c_str());
+    }
     return OK;
 }
 
-status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
+status_t ACodec::setHDRStaticInfoForVideoCodec(
+        OMX_U32 portIndex, const sp<AMessage> &configFormat, sp<AMessage> &outputFormat) {
+    CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+
+    DescribeHDRStaticInfoParams params;
+    InitOMXParams(&params);
+    params.nPortIndex = portIndex;
+
+    HDRStaticInfo *info = &params.sInfo;
+    if (getHDRStaticInfoFromFormat(configFormat, info)) {
+        setHDRStaticInfoIntoFormat(params.sInfo, outputFormat);
+    }
+
+    (void)initDescribeHDRStaticInfoIndex();
+
+    // communicate HDR static Info to codec
+    return setHDRStaticInfo(params);
+}
+
+// subsequent initial video encoder setup for surface mode
+status_t ACodec::setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(
+        android_dataspace *dataSpace /* nonnull */) {
+    DescribeColorAspectsParams params;
+    InitOMXParams(&params);
+    params.nPortIndex = kPortIndexInput;
+    ColorAspects &aspects = params.sAspects;
+
+    // reset default format and store resulting format into both input and output formats
+    getColorAspectsFromFormat(mConfigFormat, aspects);
+    int32_t width, height;
+    if (mInputFormat->findInt32("width", &width) && mInputFormat->findInt32("height", &height)) {
+        setDefaultCodecColorAspectsIfNeeded(aspects, width, height);
+    }
+    setColorAspectsIntoFormat(aspects, mInputFormat);
+    setColorAspectsIntoFormat(aspects, mOutputFormat);
+
+    // communicate color aspects to codec, but do not allow any change
+    ColorAspects origAspects = aspects;
+    status_t err = OK;
+    for (int triesLeft = 2; mDescribeColorAspectsIndex && --triesLeft >= 0; ) {
+        status_t err = setCodecColorAspects(params, true /* readBack */);
+        if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(aspects, origAspects)) {
+            break;
+        }
+        ALOGW_IF(triesLeft == 0, "[%s] Codec repeatedly changed requested ColorAspects.",
+                mComponentName.c_str());
+    }
+
+    *dataSpace = HAL_DATASPACE_BT709;
+    aspects = origAspects; // restore desired color aspects
+    status_t res = getDataSpace(
+            params, dataSpace, err == OK && mDescribeColorAspectsIndex /* tryCodec */);
+    if (err == OK) {
+        err = res;
+    }
+    mInputFormat->setInt32("android._dataspace", (int32_t)*dataSpace);
+    mInputFormat->setBuffer(
+            "android._color-aspects", ABuffer::CreateAsCopy(&aspects, sizeof(aspects)));
+
+    // update input format with codec supported color aspects (basically set unsupported
+    // aspects to Unspecified)
+    if (err == OK) {
+        (void)getInputColorAspectsForVideoEncoder(mInputFormat);
+    }
+
+    ALOGV("set default color aspects, updated input format to %s, output format to %s",
+            mInputFormat->debugString(4).c_str(), mOutputFormat->debugString(4).c_str());
+
+    return err;
+}
+
+status_t ACodec::getHDRStaticInfoForVideoCodec(OMX_U32 portIndex, sp<AMessage> &format) {
+    CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+    DescribeHDRStaticInfoParams params;
+    InitOMXParams(&params);
+    params.nPortIndex = portIndex;
+
+    status_t err = getHDRStaticInfo(params);
+    if (err == OK) {
+        // we only set decodec output HDRStaticInfo if codec supports them
+        setHDRStaticInfoIntoFormat(params.sInfo, format);
+    }
+    return err;
+}
+
+status_t ACodec::initDescribeHDRStaticInfoIndex() {
+    status_t err = mOMX->getExtensionIndex(
+            mNode, "OMX.google.android.index.describeHDRStaticInfo", &mDescribeHDRStaticInfoIndex);
+    if (err != OK) {
+        mDescribeHDRStaticInfoIndex = (OMX_INDEXTYPE)0;
+    }
+    return err;
+}
+
+status_t ACodec::setHDRStaticInfo(const DescribeHDRStaticInfoParams &params) {
+    status_t err = ERROR_UNSUPPORTED;
+    if (mDescribeHDRStaticInfoIndex) {
+        err = mOMX->setConfig(mNode, mDescribeHDRStaticInfoIndex, &params, sizeof(params));
+    }
+
+    const HDRStaticInfo *info = &params.sInfo;
+    ALOGV("[%s] setting  HDRStaticInfo (R: %u %u, G: %u %u, B: %u, %u, W: %u, %u, "
+            "MaxDispL: %u, MinDispL: %u, MaxContentL: %u, MaxFrameAvgL: %u)",
+            mComponentName.c_str(),
+            info->sType1.mR.x, info->sType1.mR.y, info->sType1.mG.x, info->sType1.mG.y,
+            info->sType1.mB.x, info->sType1.mB.y, info->sType1.mW.x, info->sType1.mW.y,
+            info->sType1.mMaxDisplayLuminance, info->sType1.mMinDisplayLuminance,
+            info->sType1.mMaxContentLightLevel, info->sType1.mMaxFrameAverageLightLevel);
+
+    ALOGW_IF(err == ERROR_UNSUPPORTED && mDescribeHDRStaticInfoIndex,
+            "[%s] setting HDRStaticInfo failed even though codec advertises support",
+            mComponentName.c_str());
+    return err;
+}
+
+status_t ACodec::getHDRStaticInfo(DescribeHDRStaticInfoParams &params) {
+    status_t err = ERROR_UNSUPPORTED;
+    if (mDescribeHDRStaticInfoIndex) {
+        err = mOMX->getConfig(mNode, mDescribeHDRStaticInfoIndex, &params, sizeof(params));
+    }
+
+    ALOGW_IF(err == ERROR_UNSUPPORTED && mDescribeHDRStaticInfoIndex,
+            "[%s] getting HDRStaticInfo failed even though codec advertises support",
+            mComponentName.c_str());
+    return err;
+}
+
+status_t ACodec::setupVideoEncoder(
+        const char *mime, const sp<AMessage> &msg,
+        sp<AMessage> &outputFormat, sp<AMessage> &inputFormat) {
     int32_t tmp;
     if (!msg->findInt32("color-format", &tmp)) {
         return INVALID_OPERATION;
@@ -3075,6 +3698,17 @@
         return err;
     }
 
+    int32_t intraRefreshPeriod = 0;
+    if (msg->findInt32("intra-refresh-period", &intraRefreshPeriod)
+            && intraRefreshPeriod >= 0) {
+        err = setIntraRefreshPeriod((uint32_t)intraRefreshPeriod, true);
+        if (err != OK) {
+            ALOGI("[%s] failed setIntraRefreshPeriod. Failure is fine since this key is optional",
+                    mComponentName.c_str());
+            err = OK;
+        }
+    }
+
     switch (compressionFormat) {
         case OMX_VIDEO_CodingMPEG4:
             err = setupMPEG4EncoderParameters(msg);
@@ -3101,6 +3735,24 @@
             break;
     }
 
+    // Set up color aspects on input, but propagate them to the output format, as they will
+    // not be read back from encoder.
+    err = setColorAspectsForVideoEncoder(msg, outputFormat, inputFormat);
+    if (err == ERROR_UNSUPPORTED) {
+        ALOGI("[%s] cannot encode color aspects. Ignoring.", mComponentName.c_str());
+        err = OK;
+    }
+
+    if (err != OK) {
+        return err;
+    }
+
+    err = setHDRStaticInfoForVideoCodec(kPortIndexInput, msg, outputFormat);
+    if (err == ERROR_UNSUPPORTED) { // support is optional
+        ALOGI("[%s] cannot encode HDR static metadata. Ignoring.", mComponentName.c_str());
+        err = OK;
+    }
+
     if (err == OK) {
         ALOGI("setupVideoEncoder succeeded");
     }
@@ -3431,15 +4083,14 @@
 
         h264type.eProfile = static_cast<OMX_VIDEO_AVCPROFILETYPE>(profile);
         h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(level);
-    }
-
-    // XXX
-    if (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline) {
-        ALOGW("Use baseline profile instead of %d for AVC recording",
-            h264type.eProfile);
+    } else {
+        // Use baseline profile for AVC recording if profile is not specified.
         h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
     }
 
+    ALOGI("setupAVCEncoderParameters with [profile: %s] [level: %s]",
+            asString(h264type.eProfile), asString(h264type.eLevel));
+
     if (h264type.eProfile == OMX_VIDEO_AVCProfileBaseline) {
         h264type.nSliceHeaderSpacing = 0;
         h264type.bUseHadamard = OMX_TRUE;
@@ -3457,6 +4108,23 @@
         h264type.bDirect8x8Inference = OMX_FALSE;
         h264type.bDirectSpatialTemporal = OMX_FALSE;
         h264type.nCabacInitIdc = 0;
+    } else if (h264type.eProfile == OMX_VIDEO_AVCProfileMain ||
+            h264type.eProfile == OMX_VIDEO_AVCProfileHigh) {
+        h264type.nSliceHeaderSpacing = 0;
+        h264type.bUseHadamard = OMX_TRUE;
+        h264type.nRefFrames = 2;
+        h264type.nBFrames = 1;
+        h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+        h264type.nAllowedPictureTypes =
+            OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB;
+        h264type.nRefIdx10ActiveMinus1 = 0;
+        h264type.nRefIdx11ActiveMinus1 = 0;
+        h264type.bEntropyCodingCABAC = OMX_TRUE;
+        h264type.bWeightedPPrediction = OMX_TRUE;
+        h264type.bconstIpred = OMX_TRUE;
+        h264type.bDirect8x8Inference = OMX_TRUE;
+        h264type.bDirectSpatialTemporal = OMX_TRUE;
+        h264type.nCabacInitIdc = 1;
     }
 
     if (h264type.nBFrames != 0) {
@@ -3525,8 +4193,8 @@
         hevcType.eProfile = static_cast<OMX_VIDEO_HEVCPROFILETYPE>(profile);
         hevcType.eLevel = static_cast<OMX_VIDEO_HEVCLEVELTYPE>(level);
     }
-
-    // TODO: Need OMX structure definition for setting iFrameInterval
+    // TODO: finer control?
+    hevcType.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate);
 
     err = mOMX->setParameter(
             mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
@@ -3623,7 +4291,8 @@
     InitOMXParams(&params);
     params.nPortIndex = kPortIndexOutput;
 
-    for (params.nProfileIndex = 0;; ++params.nProfileIndex) {
+    for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
+        params.nProfileIndex = index;
         status_t err = mOMX->getParameter(
                 mNode,
                 OMX_IndexParamVideoProfileLevelQuerySupported,
@@ -3640,7 +4309,14 @@
         if (profile == supportedProfile && level <= supportedLevel) {
             return OK;
         }
+
+        if (index == kMaxIndicesToCheck) {
+            ALOGW("[%s] stopping checking profiles after %u: %x/%x",
+                    mComponentName.c_str(), index,
+                    params.eProfile, params.eLevel);
+        }
     }
+    return ERROR_UNSUPPORTED;
 }
 
 status_t ACodec::configureBitrate(
@@ -3737,10 +4413,10 @@
 
 status_t ACodec::initNativeWindow() {
     if (mNativeWindow != NULL) {
-        return mOMX->enableGraphicBuffers(mNode, kPortIndexOutput, OMX_TRUE);
+        return mOMX->enableNativeBuffers(mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_TRUE);
     }
 
-    mOMX->enableGraphicBuffers(mNode, kPortIndexOutput, OMX_FALSE);
+    mOMX->enableNativeBuffers(mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_FALSE);
     return OK;
 }
 
@@ -3823,11 +4499,11 @@
 }
 
 // static
-bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams &params) {
-    MediaImage &image = params.sMediaImage;
+bool ACodec::describeDefaultColorFormat(DescribeColorFormat2Params &params) {
+    MediaImage2 &image = params.sMediaImage;
     memset(&image, 0, sizeof(image));
 
-    image.mType = MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
+    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
     image.mNumPlanes = 0;
 
     const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
@@ -3839,7 +4515,7 @@
         fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
         fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
         fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
-        fmt != HAL_PIXEL_FORMAT_YV12) {
+        fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
         ALOGW("do not know color format 0x%x = %d", fmt, fmt);
         return false;
     }
@@ -3851,17 +4527,21 @@
         params.nSliceHeight = params.nFrameHeight;
     }
 
-    // we need stride and slice-height to be non-zero
-    if (params.nStride == 0 || params.nSliceHeight == 0) {
+    // we need stride and slice-height to be non-zero and sensible. These values were chosen to
+    // prevent integer overflows further down the line, and do not indicate support for
+    // 32kx32k video.
+    if (params.nStride == 0 || params.nSliceHeight == 0
+            || params.nStride > 32768 || params.nSliceHeight > 32768) {
         ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
                 fmt, fmt, params.nStride, params.nSliceHeight);
         return false;
     }
 
     // set-up YUV format
-    image.mType = MediaImage::MEDIA_IMAGE_TYPE_YUV;
+    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
     image.mNumPlanes = 3;
     image.mBitDepth = 8;
+    image.mBitDepthAllocated = 8;
     image.mPlane[image.Y].mOffset = 0;
     image.mPlane[image.Y].mColInc = 1;
     image.mPlane[image.Y].mRowInc = params.nStride;
@@ -3934,26 +4614,34 @@
 // static
 bool ACodec::describeColorFormat(
         const sp<IOMX> &omx, IOMX::node_id node,
-        DescribeColorFormatParams &describeParams)
+        DescribeColorFormat2Params &describeParams)
 {
     OMX_INDEXTYPE describeColorFormatIndex;
     if (omx->getExtensionIndex(
             node, "OMX.google.android.index.describeColorFormat",
-            &describeColorFormatIndex) != OK ||
-        omx->getParameter(
-            node, describeColorFormatIndex,
-            &describeParams, sizeof(describeParams)) != OK) {
-        return describeDefaultColorFormat(describeParams);
+            &describeColorFormatIndex) == OK) {
+        DescribeColorFormatParams describeParamsV1(describeParams);
+        if (omx->getParameter(
+                node, describeColorFormatIndex,
+                &describeParamsV1, sizeof(describeParamsV1)) == OK) {
+            describeParams.initFromV1(describeParamsV1);
+            return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+        }
+    } else if (omx->getExtensionIndex(
+            node, "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
+               && omx->getParameter(
+            node, describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
+        return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
     }
-    return describeParams.sMediaImage.mType !=
-            MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
+
+    return describeDefaultColorFormat(describeParams);
 }
 
 // static
 bool ACodec::isFlexibleColorFormat(
          const sp<IOMX> &omx, IOMX::node_id node,
          uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
-    DescribeColorFormatParams describeParams;
+    DescribeColorFormat2Params describeParams;
     InitOMXParams(&describeParams);
     describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
     // reasonable dummy values
@@ -3969,11 +4657,11 @@
         return false;
     }
 
-    const MediaImage &img = describeParams.sMediaImage;
-    if (img.mType == MediaImage::MEDIA_IMAGE_TYPE_YUV) {
-        if (img.mNumPlanes != 3 ||
-            img.mPlane[img.Y].mHorizSubsampling != 1 ||
-            img.mPlane[img.Y].mVertSubsampling != 1) {
+    const MediaImage2 &img = describeParams.sMediaImage;
+    if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
+        if (img.mNumPlanes != 3
+                || img.mPlane[img.Y].mHorizSubsampling != 1
+                || img.mPlane[img.Y].mVertSubsampling != 1) {
             return false;
         }
 
@@ -4023,7 +4711,7 @@
                     notify->setInt32("color-format", videoDef->eColorFormat);
 
                     if (mNativeWindow == NULL) {
-                        DescribeColorFormatParams describeParams;
+                        DescribeColorFormat2Params describeParams;
                         InitOMXParams(&describeParams);
                         describeParams.eColorFormat = videoDef->eColorFormat;
                         describeParams.nFrameWidth = videoDef->nFrameWidth;
@@ -4039,54 +4727,72 @@
                                             &describeParams.sMediaImage,
                                             sizeof(describeParams.sMediaImage)));
 
-                            MediaImage *img = &describeParams.sMediaImage;
-                            ALOGV("[%s] MediaImage { F(%ux%u) @%u+%u+%u @%u+%u+%u @%u+%u+%u }",
-                                    mComponentName.c_str(), img->mWidth, img->mHeight,
-                                    img->mPlane[0].mOffset, img->mPlane[0].mColInc, img->mPlane[0].mRowInc,
-                                    img->mPlane[1].mOffset, img->mPlane[1].mColInc, img->mPlane[1].mRowInc,
-                                    img->mPlane[2].mOffset, img->mPlane[2].mColInc, img->mPlane[2].mRowInc);
+                            MediaImage2 &img = describeParams.sMediaImage;
+                            MediaImage2::PlaneInfo *plane = img.mPlane;
+                            ALOGV("[%s] MediaImage { F(%ux%u) @%u+%d+%d @%u+%d+%d @%u+%d+%d }",
+                                    mComponentName.c_str(), img.mWidth, img.mHeight,
+                                    plane[0].mOffset, plane[0].mColInc, plane[0].mRowInc,
+                                    plane[1].mOffset, plane[1].mColInc, plane[1].mRowInc,
+                                    plane[2].mOffset, plane[2].mColInc, plane[2].mRowInc);
                         }
                     }
 
-                    if (portIndex != kPortIndexOutput) {
-                        // TODO: also get input crop
-                        break;
+                    int32_t width = (int32_t)videoDef->nFrameWidth;
+                    int32_t height = (int32_t)videoDef->nFrameHeight;
+
+                    if (portIndex == kPortIndexOutput) {
+                        OMX_CONFIG_RECTTYPE rect;
+                        InitOMXParams(&rect);
+                        rect.nPortIndex = portIndex;
+
+                        if (mOMX->getConfig(
+                                    mNode,
+                                    (portIndex == kPortIndexOutput ?
+                                            OMX_IndexConfigCommonOutputCrop :
+                                            OMX_IndexConfigCommonInputCrop),
+                                    &rect, sizeof(rect)) != OK) {
+                            rect.nLeft = 0;
+                            rect.nTop = 0;
+                            rect.nWidth = videoDef->nFrameWidth;
+                            rect.nHeight = videoDef->nFrameHeight;
+                        }
+
+                        if (rect.nLeft < 0 ||
+                            rect.nTop < 0 ||
+                            rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
+                            rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
+                            ALOGE("Wrong cropped rect (%d, %d) - (%u, %u) vs. frame (%u, %u)",
+                                    rect.nLeft, rect.nTop,
+                                    rect.nLeft + rect.nWidth, rect.nTop + rect.nHeight,
+                                    videoDef->nFrameWidth, videoDef->nFrameHeight);
+                            return BAD_VALUE;
+                        }
+
+                        notify->setRect(
+                                "crop",
+                                rect.nLeft,
+                                rect.nTop,
+                                rect.nLeft + rect.nWidth - 1,
+                                rect.nTop + rect.nHeight - 1);
+
+                        width = rect.nWidth;
+                        height = rect.nHeight;
+
+                        android_dataspace dataSpace = HAL_DATASPACE_UNKNOWN;
+                        (void)getColorAspectsAndDataSpaceForVideoDecoder(
+                                width, height, mConfigFormat, notify,
+                                mUsingNativeWindow ? &dataSpace : NULL);
+                        if (mUsingNativeWindow) {
+                            notify->setInt32("android._dataspace", dataSpace);
+                        }
+                        (void)getHDRStaticInfoForVideoCodec(kPortIndexOutput, notify);
+                    } else {
+                        (void)getInputColorAspectsForVideoEncoder(notify);
+                        if (mConfigFormat->contains("hdr-static-info")) {
+                            (void)getHDRStaticInfoForVideoCodec(kPortIndexInput, notify);
+                        }
                     }
 
-                    OMX_CONFIG_RECTTYPE rect;
-                    InitOMXParams(&rect);
-                    rect.nPortIndex = portIndex;
-
-                    if (mOMX->getConfig(
-                                mNode,
-                                (portIndex == kPortIndexOutput ?
-                                        OMX_IndexConfigCommonOutputCrop :
-                                        OMX_IndexConfigCommonInputCrop),
-                                &rect, sizeof(rect)) != OK) {
-                        rect.nLeft = 0;
-                        rect.nTop = 0;
-                        rect.nWidth = videoDef->nFrameWidth;
-                        rect.nHeight = videoDef->nFrameHeight;
-                    }
-
-                    if (rect.nLeft < 0 ||
-                        rect.nTop < 0 ||
-                        rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
-                        rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
-                        ALOGE("Wrong cropped rect (%d, %d) - (%u, %u) vs. frame (%u, %u)",
-                                rect.nLeft, rect.nTop,
-                                rect.nLeft + rect.nWidth, rect.nTop + rect.nHeight,
-                                videoDef->nFrameWidth, videoDef->nFrameHeight);
-                        return BAD_VALUE;
-                    }
-
-                    notify->setRect(
-                            "crop",
-                            rect.nLeft,
-                            rect.nTop,
-                            rect.nLeft + rect.nWidth - 1,
-                            rect.nTop + rect.nHeight - 1);
-
                     break;
                 }
 
@@ -4149,6 +4855,11 @@
                     } else {
                         notify->setString("mime", mime.c_str());
                     }
+                    uint32_t intraRefreshPeriod = 0;
+                    if (mIsEncoder && getIntraRefreshPeriod(&intraRefreshPeriod) == OK
+                            && intraRefreshPeriod > 0) {
+                        notify->setInt32("intra-refresh-period", intraRefreshPeriod);
+                    }
                     break;
                 }
             }
@@ -4180,15 +4891,11 @@
 
                     if (params.nChannels <= 0
                             || (params.nChannels != 1 && !params.bInterleaved)
-                            || params.nBitPerSample != 16u
-                            || params.eNumData != OMX_NumericalDataSigned
                             || params.ePCMMode != OMX_AUDIO_PCMModeLinear) {
-                        ALOGE("unsupported PCM port: %u channels%s, %u-bit, %s(%d), %s(%d) mode ",
+                        ALOGE("unsupported PCM port: %u channels%s, %u-bit",
                                 params.nChannels,
                                 params.bInterleaved ? " interleaved" : "",
-                                params.nBitPerSample,
-                                asString(params.eNumData), params.eNumData,
-                                asString(params.ePCMMode), params.ePCMMode);
+                                params.nBitPerSample);
                         return FAILED_TRANSACTION;
                     }
 
@@ -4196,6 +4903,22 @@
                     notify->setInt32("channel-count", params.nChannels);
                     notify->setInt32("sample-rate", params.nSamplingRate);
 
+                    AudioEncoding encoding = kAudioEncodingPcm16bit;
+                    if (params.eNumData == OMX_NumericalDataUnsigned
+                            && params.nBitPerSample == 8u) {
+                        encoding = kAudioEncodingPcm8bit;
+                    } else if (params.eNumData == OMX_NumericalDataFloat
+                            && params.nBitPerSample == 32u) {
+                        encoding = kAudioEncodingPcmFloat;
+                    } else if (params.nBitPerSample != 16u
+                            || params.eNumData != OMX_NumericalDataSigned) {
+                        ALOGE("unsupported PCM port: %s(%d), %s(%d) mode ",
+                                asString(params.eNumData), params.eNumData,
+                                asString(params.ePCMMode), params.ePCMMode);
+                        return FAILED_TRANSACTION;
+                    }
+                    notify->setInt32("pcm-encoding", encoding);
+
                     if (mChannelMaskPresent) {
                         notify->setInt32("channel-mask", mChannelMask);
                     }
@@ -4377,6 +5100,7 @@
                     notify->setString("mime", mime);
                     notify->setInt32("channel-count", params.nChannels);
                     notify->setInt32("sample-rate", params.nSamplingRate);
+                    notify->setInt32("pcm-encoding", kAudioEncodingPcm16bit);
                     break;
                 }
 
@@ -4414,44 +5138,123 @@
     return OK;
 }
 
-void ACodec::sendFormatChange(const sp<AMessage> &reply) {
-    sp<AMessage> notify = mBaseOutputFormat->dup();
-    notify->setInt32("what", kWhatOutputFormatChanged);
+void ACodec::onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects) {
+    // aspects are normally communicated in ColorAspects
+    int32_t range, standard, transfer;
+    convertCodecColorAspectsToPlatformAspects(aspects, &range, &standard, &transfer);
 
-    if (getPortFormat(kPortIndexOutput, notify) != OK) {
+    // if some aspects are unspecified, use dataspace fields
+    if (range != 0) {
+        range = (dataSpace & HAL_DATASPACE_RANGE_MASK) >> HAL_DATASPACE_RANGE_SHIFT;
+    }
+    if (standard != 0) {
+        standard = (dataSpace & HAL_DATASPACE_STANDARD_MASK) >> HAL_DATASPACE_STANDARD_SHIFT;
+    }
+    if (transfer != 0) {
+        transfer = (dataSpace & HAL_DATASPACE_TRANSFER_MASK) >> HAL_DATASPACE_TRANSFER_SHIFT;
+    }
+
+    mOutputFormat = mOutputFormat->dup(); // trigger an output format changed event
+    if (range != 0) {
+        mOutputFormat->setInt32("color-range", range);
+    }
+    if (standard != 0) {
+        mOutputFormat->setInt32("color-standard", standard);
+    }
+    if (transfer != 0) {
+        mOutputFormat->setInt32("color-transfer", transfer);
+    }
+
+    ALOGD("dataspace changed to %#x (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+          "(R:%d(%s), S:%d(%s), T:%d(%s))",
+            dataSpace,
+            aspects.mRange, asString(aspects.mRange),
+            aspects.mPrimaries, asString(aspects.mPrimaries),
+            aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+            aspects.mTransfer, asString(aspects.mTransfer),
+            range, asString((ColorRange)range),
+            standard, asString((ColorStandard)standard),
+            transfer, asString((ColorTransfer)transfer));
+}
+
+void ACodec::onOutputFormatChanged(sp<const AMessage> expectedFormat) {
+    // store new output format, at the same time mark that this is no longer the first frame
+    mOutputFormat = mBaseOutputFormat->dup();
+
+    if (getPortFormat(kPortIndexOutput, mOutputFormat) != OK) {
         ALOGE("[%s] Failed to get port format to send format change", mComponentName.c_str());
         return;
     }
 
-    AString mime;
-    CHECK(notify->findString("mime", &mime));
+    if (expectedFormat != NULL) {
+        sp<const AMessage> changes = expectedFormat->changesFrom(mOutputFormat);
+        sp<const AMessage> to = mOutputFormat->changesFrom(expectedFormat);
+        if (changes->countEntries() != 0 || to->countEntries() != 0) {
+            ALOGW("[%s] BAD CODEC: Output format changed unexpectedly from (diff) %s to (diff) %s",
+                    mComponentName.c_str(),
+                    changes->debugString(4).c_str(), to->debugString(4).c_str());
+        }
+    }
 
-    int32_t left, top, right, bottom;
-    if (mime == MEDIA_MIMETYPE_VIDEO_RAW &&
-        mNativeWindow != NULL &&
-        notify->findRect("crop", &left, &top, &right, &bottom)) {
-        // notify renderer of the crop change
+    if (!mIsVideo && !mIsEncoder) {
+        AudioEncoding pcmEncoding = kAudioEncodingPcm16bit;
+        (void)mConfigFormat->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+        AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
+        (void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+
+        mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
+        if (mConverter[kPortIndexOutput] != NULL) {
+            mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
+        }
+    }
+
+    if (mTunneled) {
+        sendFormatChange();
+    }
+}
+
+void ACodec::addKeyFormatChangesToRenderBufferNotification(sp<AMessage> &notify) {
+    AString mime;
+    CHECK(mOutputFormat->findString("mime", &mime));
+
+    if (mime == MEDIA_MIMETYPE_VIDEO_RAW && mNativeWindow != NULL) {
+        // notify renderer of the crop change and dataspace change
         // NOTE: native window uses extended right-bottom coordinate
-        reply->setRect("crop", left, top, right + 1, bottom + 1);
-    } else if (mime == MEDIA_MIMETYPE_AUDIO_RAW &&
-               (mEncoderDelay || mEncoderPadding)) {
+        int32_t left, top, right, bottom;
+        if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
+            notify->setRect("crop", left, top, right + 1, bottom + 1);
+        }
+
+        int32_t dataSpace;
+        if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+            notify->setInt32("dataspace", dataSpace);
+        }
+    }
+}
+
+void ACodec::sendFormatChange() {
+    AString mime;
+    CHECK(mOutputFormat->findString("mime", &mime));
+
+    if (mime == MEDIA_MIMETYPE_AUDIO_RAW && (mEncoderDelay || mEncoderPadding)) {
         int32_t channelCount;
-        CHECK(notify->findInt32("channel-count", &channelCount));
-        size_t frameSize = channelCount * sizeof(int16_t);
+        CHECK(mOutputFormat->findInt32("channel-count", &channelCount));
         if (mSkipCutBuffer != NULL) {
             size_t prevbufsize = mSkipCutBuffer->size();
             if (prevbufsize != 0) {
                 ALOGW("Replacing SkipCutBuffer holding %zu bytes", prevbufsize);
             }
         }
-        mSkipCutBuffer = new SkipCutBuffer(
-                mEncoderDelay * frameSize,
-                mEncoderPadding * frameSize);
+        mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay, mEncoderPadding, channelCount);
     }
 
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatOutputFormatChanged);
+    notify->setMessage("format", mOutputFormat);
     notify->post();
 
-    mSentFormat = true;
+    // mLastOutputFormat is not used when tunneled; doing this just to stay consistent
+    mLastOutputFormat = mOutputFormat;
 }
 
 void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
@@ -4499,9 +5302,12 @@
 }
 
 void ACodec::PortDescription::addBuffer(
-        IOMX::buffer_id id, const sp<ABuffer> &buffer) {
+        IOMX::buffer_id id, const sp<ABuffer> &buffer,
+        const sp<NativeHandle> &handle, const sp<RefBase> &memRef) {
     mBufferIDs.push_back(id);
     mBuffers.push_back(buffer);
+    mHandles.push_back(handle);
+    mMemRefs.push_back(memRef);
 }
 
 size_t ACodec::PortDescription::countBuffers() {
@@ -4516,6 +5322,14 @@
     return mBuffers.itemAt(index);
 }
 
+sp<NativeHandle> ACodec::PortDescription::handleAt(size_t index) const {
+    return mHandles.itemAt(index);
+}
+
+sp<RefBase> ACodec::PortDescription::memRefAt(size_t index) const {
+    return mMemRefs.itemAt(index);
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 ACodec::BaseState::BaseState(ACodec *codec, const sp<AState> &parentState)
@@ -4741,6 +5555,17 @@
 
 bool ACodec::BaseState::onOMXEvent(
         OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
+    if (event == OMX_EventDataSpaceChanged) {
+        ColorAspects aspects;
+        aspects.mRange = (ColorAspects::Range)((data2 >> 24) & 0xFF);
+        aspects.mPrimaries = (ColorAspects::Primaries)((data2 >> 16) & 0xFF);
+        aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((data2 >> 8) & 0xFF);
+        aspects.mTransfer = (ColorAspects::Transfer)(data2 & 0xFF);
+
+        mCodec->onDataSpaceChanged((android_dataspace)data1, aspects);
+        return true;
+    }
+
     if (event != OMX_EventError) {
         ALOGV("[%s] EVENT(%d, 0x%08x, 0x%08x)",
              mCodec->mComponentName.c_str(), event, data1, data2);
@@ -4901,29 +5726,38 @@
 
                 OMX_U32 flags = OMX_BUFFERFLAG_ENDOFFRAME;
 
-                int32_t isCSD;
+                MetadataBufferType metaType = mCodec->mInputMetadataType;
+                int32_t isCSD = 0;
                 if (buffer->meta()->findInt32("csd", &isCSD) && isCSD != 0) {
+                    if (mCodec->mIsLegacyVP9Decoder) {
+                        ALOGV("[%s] is legacy VP9 decoder. Ignore %u codec specific data",
+                            mCodec->mComponentName.c_str(), bufferID);
+                        postFillThisBuffer(info);
+                        break;
+                    }
                     flags |= OMX_BUFFERFLAG_CODECCONFIG;
+                    metaType = kMetadataBufferTypeInvalid;
                 }
 
                 if (eos) {
                     flags |= OMX_BUFFERFLAG_EOS;
                 }
 
-                if (buffer != info->mData) {
+                if (buffer != info->mCodecData) {
                     ALOGV("[%s] Needs to copy input data for buffer %u. (%p != %p)",
                          mCodec->mComponentName.c_str(),
                          bufferID,
-                         buffer.get(), info->mData.get());
+                         buffer.get(), info->mCodecData.get());
 
-                    if (buffer->size() > info->mData->capacity()) {
-                        ALOGE("data size (%zu) is greated than buffer capacity (%zu)",
-                                buffer->size(),           // this is the data received
-                                info->mData->capacity()); // this is out buffer size
-                        mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+                    sp<DataConverter> converter = mCodec->mConverter[kPortIndexInput];
+                    if (converter == NULL || isCSD) {
+                        converter = getCopyConverter();
+                    }
+                    status_t err = converter->convert(buffer, info->mCodecData);
+                    if (err != OK) {
+                        mCodec->signalError(OMX_ErrorUndefined, err);
                         return;
                     }
-                    memcpy(info->mData->data(), buffer->data(), buffer->size());
                 }
 
                 if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
@@ -4962,14 +5796,50 @@
                     }
                 }
                 info->checkReadFence("onInputBufferFilled");
-                status_t err2 = mCodec->mOMX->emptyBuffer(
-                    mCodec->mNode,
-                    bufferID,
-                    0,
-                    buffer->size(),
-                    flags,
-                    timeUs,
-                    info->mFenceFd);
+
+                status_t err2 = OK;
+                switch (metaType) {
+                case kMetadataBufferTypeInvalid:
+                    break;
+#ifndef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+                case kMetadataBufferTypeNativeHandleSource:
+                    if (info->mCodecData->size() >= sizeof(VideoNativeHandleMetadata)) {
+                        VideoNativeHandleMetadata *vnhmd =
+                            (VideoNativeHandleMetadata*)info->mCodecData->base();
+                        err2 = mCodec->mOMX->updateNativeHandleInMeta(
+                                mCodec->mNode, kPortIndexInput,
+                                NativeHandle::create(vnhmd->pHandle, false /* ownsHandle */),
+                                bufferID);
+                    }
+                    break;
+                case kMetadataBufferTypeANWBuffer:
+                    if (info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
+                        VideoNativeMetadata *vnmd = (VideoNativeMetadata*)info->mCodecData->base();
+                        err2 = mCodec->mOMX->updateGraphicBufferInMeta(
+                                mCodec->mNode, kPortIndexInput,
+                                new GraphicBuffer(vnmd->pBuffer, false /* keepOwnership */),
+                                bufferID);
+                    }
+                    break;
+#endif
+                default:
+                    ALOGW("Can't marshall %s data in %zu sized buffers in %zu-bit mode",
+                            asString(metaType), info->mCodecData->size(),
+                            sizeof(buffer_handle_t) * 8);
+                    err2 = ERROR_UNSUPPORTED;
+                    break;
+                }
+
+                if (err2 == OK) {
+                    err2 = mCodec->mOMX->emptyBuffer(
+                        mCodec->mNode,
+                        bufferID,
+                        0,
+                        info->mCodecData->size(),
+                        flags,
+                        timeUs,
+                        info->mFenceFd);
+                }
                 info->mFenceFd = -1;
                 if (err2 != OK) {
                     mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err2));
@@ -5144,30 +6014,46 @@
             sp<AMessage> reply =
                 new AMessage(kWhatOutputBufferDrained, mCodec);
 
-            if (!mCodec->mSentFormat && rangeLength > 0) {
-                mCodec->sendFormatChange(reply);
+            if (mCodec->mOutputFormat != mCodec->mLastOutputFormat && rangeLength > 0) {
+                // pretend that output format has changed on the first frame (we used to do this)
+                if (mCodec->mBaseOutputFormat == mCodec->mOutputFormat) {
+                    mCodec->onOutputFormatChanged(mCodec->mOutputFormat);
+                }
+                mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
+                mCodec->sendFormatChange();
+            } else if (rangeLength > 0 && mCodec->mNativeWindow != NULL) {
+                // If potentially rendering onto a surface, always save key format data (crop &
+                // data space) so that we can set it if and once the buffer is rendered.
+                mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
             }
+
             if (mCodec->usingMetadataOnEncoderOutput()) {
                 native_handle_t *handle = NULL;
-                VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)info->mData->data();
-                VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)info->mData->data();
-                if (info->mData->size() >= sizeof(grallocMeta)
-                        && grallocMeta.eType == kMetadataBufferTypeGrallocSource) {
-                    handle = (native_handle_t *)(uintptr_t)grallocMeta.pHandle;
-                } else if (info->mData->size() >= sizeof(nativeMeta)
-                        && nativeMeta.eType == kMetadataBufferTypeANWBuffer) {
+                VideoNativeHandleMetadata &nativeMeta =
+                    *(VideoNativeHandleMetadata *)info->mData->data();
+                if (info->mData->size() >= sizeof(nativeMeta)
+                        && nativeMeta.eType == kMetadataBufferTypeNativeHandleSource) {
 #ifdef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
-                    // ANativeWindowBuffer is only valid on 32-bit/mediaserver process
+                    // handle is only valid on 32-bit/mediaserver process
                     handle = NULL;
 #else
-                    handle = (native_handle_t *)nativeMeta.pBuffer->handle;
+                    handle = (native_handle_t *)nativeMeta.pHandle;
 #endif
                 }
                 info->mData->meta()->setPointer("handle", handle);
                 info->mData->meta()->setInt32("rangeOffset", rangeOffset);
                 info->mData->meta()->setInt32("rangeLength", rangeLength);
-            } else {
+            } else if (info->mData == info->mCodecData) {
                 info->mData->setRange(rangeOffset, rangeLength);
+            } else {
+                info->mCodecData->setRange(rangeOffset, rangeLength);
+                // in this case we know that mConverter is not null
+                status_t err = mCodec->mConverter[kPortIndexOutput]->convert(
+                        info->mCodecData, info->mData);
+                if (err != OK) {
+                    mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+                    return true;
+                }
             }
 #if 0
             if (mCodec->mNativeWindow == NULL) {
@@ -5239,11 +6125,22 @@
     }
 
     android_native_rect_t crop;
-    if (msg->findRect("crop", &crop.left, &crop.top, &crop.right, &crop.bottom)) {
+    if (msg->findRect("crop", &crop.left, &crop.top, &crop.right, &crop.bottom)
+            && memcmp(&crop, &mCodec->mLastNativeWindowCrop, sizeof(crop)) != 0) {
+        mCodec->mLastNativeWindowCrop = crop;
         status_t err = native_window_set_crop(mCodec->mNativeWindow.get(), &crop);
         ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
     }
 
+    int32_t dataSpace;
+    if (msg->findInt32("dataspace", &dataSpace)
+            && dataSpace != mCodec->mLastNativeWindowDataSpace) {
+        status_t err = native_window_set_buffers_data_space(
+                mCodec->mNativeWindow.get(), (android_dataspace)dataSpace);
+        mCodec->mLastNativeWindowDataSpace = dataSpace;
+        ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err);
+    }
+
     int32_t render;
     if (mCodec->mNativeWindow != NULL
             && msg->findInt32("render", &render) && render != 0
@@ -5363,10 +6260,11 @@
     ALOGV("Now uninitialized");
 
     if (mDeathNotifier != NULL) {
-        IInterface::asBinder(mCodec->mOMX)->unlinkToDeath(mDeathNotifier);
+        mCodec->mNodeBinder->unlinkToDeath(mDeathNotifier);
         mDeathNotifier.clear();
     }
 
+    mCodec->mUsingNativeWindow = false;
     mCodec->mNativeWindow.clear();
     mCodec->mNativeWindowUsageBits = 0;
     mCodec->mNode = 0;
@@ -5375,6 +6273,8 @@
     mCodec->mFlags = 0;
     mCodec->mInputMetadataType = kMetadataBufferTypeInvalid;
     mCodec->mOutputMetadataType = kMetadataBufferTypeInvalid;
+    mCodec->mConverter[0].clear();
+    mCodec->mConverter[1].clear();
     mCodec->mComponentName.clear();
 }
 
@@ -5460,14 +6360,7 @@
 
     sp<AMessage> notify = new AMessage(kWhatOMXDied, mCodec);
 
-    mDeathNotifier = new DeathNotifier(notify);
-    if (IInterface::asBinder(omx)->linkToDeath(mDeathNotifier) != OK) {
-        // This was a local binder, if it dies so do we, we won't care
-        // about any notifications in the afterlife.
-        mDeathNotifier.clear();
-    }
-
-    Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs;
+    Vector<AString> matchingCodecs;
 
     AString mime;
 
@@ -5475,13 +6368,9 @@
     uint32_t quirks = 0;
     int32_t encoder = false;
     if (msg->findString("componentName", &componentName)) {
-        ssize_t index = matchingCodecs.add();
-        OMXCodec::CodecNameAndQuirks *entry = &matchingCodecs.editItemAt(index);
-        entry->mName = String8(componentName.c_str());
-
-        if (!OMXCodec::findCodecQuirks(
-                    componentName.c_str(), &entry->mQuirks)) {
-            entry->mQuirks = 0;
+        sp<IMediaCodecList> list = MediaCodecList::getInstance();
+        if (list != NULL && list->findCodecByName(componentName.c_str()) >= 0) {
+            matchingCodecs.add(componentName);
         }
     } else {
         CHECK(msg->findString("mime", &mime));
@@ -5490,11 +6379,10 @@
             encoder = false;
         }
 
-        OMXCodec::findMatchingCodecs(
+        MediaCodecList::findMatchingCodecs(
                 mime.c_str(),
                 encoder, // createEncoder
-                NULL,  // matchComponentName
-                0,     // flags
+                0,       // flags
                 &matchingCodecs);
     }
 
@@ -5504,13 +6392,13 @@
     status_t err = NAME_NOT_FOUND;
     for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
             ++matchIndex) {
-        componentName = matchingCodecs.itemAt(matchIndex).mName.string();
-        quirks = matchingCodecs.itemAt(matchIndex).mQuirks;
+        componentName = matchingCodecs[matchIndex];
+        quirks = MediaCodecList::getQuirksFor(componentName.c_str());
 
         pid_t tid = gettid();
         int prevPriority = androidGetThreadPriority(tid);
         androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
-        err = omx->allocateNode(componentName.c_str(), observer, &node);
+        err = omx->allocateNode(componentName.c_str(), observer, &mCodec->mNodeBinder, &node);
         androidSetThreadPriority(tid, prevPriority);
 
         if (err == OK) {
@@ -5534,6 +6422,14 @@
         return false;
     }
 
+    mDeathNotifier = new DeathNotifier(notify);
+    if (mCodec->mNodeBinder == NULL ||
+            mCodec->mNodeBinder->linkToDeath(mDeathNotifier) != OK) {
+        // This was a local binder, if it dies so do we, we won't care
+        // about any notifications in the afterlife.
+        mDeathNotifier.clear();
+    }
+
     notify = new AMessage(kWhatOMXMessageList, mCodec);
     observer->setNotificationMessage(notify);
 
@@ -5803,6 +6699,17 @@
                 "using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
     }
 
+    sp<ABuffer> colorAspectsBuffer;
+    if (mCodec->mInputFormat->findBuffer("android._color-aspects", &colorAspectsBuffer)) {
+        err = mCodec->mOMX->setInternalOption(
+                mCodec->mNode, kPortIndexInput, IOMX::INTERNAL_OPTION_COLOR_ASPECTS,
+                colorAspectsBuffer->base(), colorAspectsBuffer->capacity());
+        if (err != OK) {
+            ALOGE("[%s] Unable to configure color aspects (err %d)",
+                  mCodec->mComponentName.c_str(), err);
+            return err;
+        }
+    }
     return OK;
 }
 
@@ -5813,9 +6720,23 @@
     sp<AMessage> notify = mCodec->mNotify->dup();
     notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
 
+    android_dataspace dataSpace;
+    status_t err =
+        mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
+    notify->setMessage("input-format", mCodec->mInputFormat);
+    notify->setMessage("output-format", mCodec->mOutputFormat);
+
     sp<IGraphicBufferProducer> bufferProducer;
-    status_t err = mCodec->mOMX->createInputSurface(
-            mCodec->mNode, kPortIndexInput, &bufferProducer, &mCodec->mInputMetadataType);
+    if (err == OK) {
+        mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
+        err = mCodec->mOMX->createInputSurface(
+                mCodec->mNode, kPortIndexInput, dataSpace, &bufferProducer,
+                &mCodec->mInputMetadataType);
+        // framework uses ANW buffers internally instead of gralloc handles
+        if (mCodec->mInputMetadataType == kMetadataBufferTypeGrallocSource) {
+            mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
+        }
+    }
 
     if (err == OK) {
         err = setupInputSurface();
@@ -5846,11 +6767,25 @@
     CHECK(msg->findObject("input-surface", &obj));
     sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
 
-    status_t err = mCodec->mOMX->setInputSurface(
-            mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
-            &mCodec->mInputMetadataType);
+    android_dataspace dataSpace;
+    status_t err =
+        mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
+    notify->setMessage("input-format", mCodec->mInputFormat);
+    notify->setMessage("output-format", mCodec->mOutputFormat);
 
     if (err == OK) {
+        mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
+        err = mCodec->mOMX->setInputSurface(
+                mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
+                &mCodec->mInputMetadataType);
+        // framework uses ANW buffers internally instead of gralloc handles
+        if (mCodec->mInputMetadataType == kMetadataBufferTypeGrallocSource) {
+            mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
+        }
+    }
+
+    if (err == OK) {
+        surface->getBufferConsumer()->setDefaultBufferDataSpace(dataSpace);
         err = setupInputSurface();
     }
 
@@ -5893,6 +6828,15 @@
 
         mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
 
+        mCodec->mOMX->sendCommand(
+                mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded);
+        if (mCodec->allYourBuffersAreBelongToUs(kPortIndexInput)) {
+            mCodec->freeBuffersOnPort(kPortIndexInput);
+        }
+        if (mCodec->allYourBuffersAreBelongToUs(kPortIndexOutput)) {
+            mCodec->freeBuffersOnPort(kPortIndexOutput);
+        }
+
         mCodec->changeState(mCodec->mLoadedState);
     }
 }
@@ -6346,6 +7290,17 @@
         }
     }
 
+    int32_t intraRefreshPeriod = 0;
+    if (params->findInt32("intra-refresh-period", &intraRefreshPeriod)
+            && intraRefreshPeriod > 0) {
+        status_t err = setIntraRefreshPeriod(intraRefreshPeriod, false);
+        if (err != OK) {
+            ALOGI("[%s] failed setIntraRefreshPeriod. Failure is fine since this key is optional",
+                    mComponentName.c_str());
+            err = OK;
+        }
+    }
+
     return OK;
 }
 
@@ -6372,6 +7327,8 @@
         {
             CHECK_EQ(data1, (OMX_U32)kPortIndexOutput);
 
+            mCodec->onOutputFormatChanged();
+
             if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
                 mCodec->mMetadataBuffersToSubmit = 0;
                 CHECK_EQ(mCodec->mOMX->sendCommand(
@@ -6382,14 +7339,8 @@
                 mCodec->freeOutputBuffersNotOwnedByComponent();
 
                 mCodec->changeState(mCodec->mOutputPortSettingsChangedState);
-            } else if (data2 == OMX_IndexConfigCommonOutputCrop) {
-                mCodec->mSentFormat = false;
-
-                if (mCodec->mTunneled) {
-                    sp<AMessage> dummy = new AMessage(kWhatOutputBufferDrained, mCodec);
-                    mCodec->sendFormatChange(dummy);
-                }
-            } else {
+            } else if (data2 != OMX_IndexConfigCommonOutputCrop
+                    && data2 != OMX_IndexConfigAndroidIntraRefresh) {
                 ALOGV("[%s] OMX_EventPortSettingsChanged 0x%08x",
                      mCodec->mComponentName.c_str(), data2);
             }
@@ -6516,13 +7467,6 @@
                     return false;
                 }
 
-                mCodec->mSentFormat = false;
-
-                if (mCodec->mTunneled) {
-                    sp<AMessage> dummy = new AMessage(kWhatOutputBufferDrained, mCodec);
-                    mCodec->sendFormatChange(dummy);
-                }
-
                 ALOGV("[%s] Output port now reenabled.", mCodec->mComponentName.c_str());
 
                 if (mCodec->mExecutingState->active()) {
@@ -6581,7 +7525,7 @@
     ALOGV("[%s] Now Executing->Idle", mCodec->mComponentName.c_str());
 
     mComponentNowIdle = false;
-    mCodec->mSentFormat = false;
+    mCodec->mLastOutputFormat.clear();
 }
 
 bool ACodec::ExecutingToIdleState::onOMXEvent(
@@ -6865,4 +7809,230 @@
     }
 }
 
+status_t ACodec::queryCapabilities(
+        const AString &name, const AString &mime, bool isEncoder,
+        sp<MediaCodecInfo::Capabilities> *caps) {
+    (*caps).clear();
+    const char *role = getComponentRole(isEncoder, mime.c_str());
+    if (role == NULL) {
+        return BAD_VALUE;
+    }
+
+    OMXClient client;
+    status_t err = client.connect();
+    if (err != OK) {
+        return err;
+    }
+
+    sp<IOMX> omx = client.interface();
+    sp<CodecObserver> observer = new CodecObserver;
+    IOMX::node_id node = 0;
+
+    err = omx->allocateNode(name.c_str(), observer, NULL, &node);
+    if (err != OK) {
+        client.disconnect();
+        return err;
+    }
+
+    err = setComponentRole(omx, node, role);
+    if (err != OK) {
+        omx->freeNode(node);
+        client.disconnect();
+        return err;
+    }
+
+    sp<MediaCodecInfo::CapabilitiesBuilder> builder = new MediaCodecInfo::CapabilitiesBuilder();
+    bool isVideo = mime.startsWithIgnoreCase("video/");
+
+    if (isVideo) {
+        OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
+        InitOMXParams(&param);
+        param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
+
+        for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
+            param.nProfileIndex = index;
+            status_t err = omx->getParameter(
+                    node, OMX_IndexParamVideoProfileLevelQuerySupported,
+                    &param, sizeof(param));
+            if (err != OK) {
+                break;
+            }
+            builder->addProfileLevel(param.eProfile, param.eLevel);
+
+            if (index == kMaxIndicesToCheck) {
+                ALOGW("[%s] stopping checking profiles after %u: %x/%x",
+                        name.c_str(), index,
+                        param.eProfile, param.eLevel);
+            }
+        }
+
+        // Color format query
+        // return colors in the order reported by the OMX component
+        // prefix "flexible" standard ones with the flexible equivalent
+        OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
+        InitOMXParams(&portFormat);
+        portFormat.nPortIndex = isEncoder ? kPortIndexInput : kPortIndexOutput;
+        Vector<uint32_t> supportedColors; // shadow copy to check for duplicates
+        for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
+            portFormat.nIndex = index;
+            status_t err = omx->getParameter(
+                    node, OMX_IndexParamVideoPortFormat,
+                    &portFormat, sizeof(portFormat));
+            if (err != OK) {
+                break;
+            }
+
+            OMX_U32 flexibleEquivalent;
+            if (isFlexibleColorFormat(
+                    omx, node, portFormat.eColorFormat, false /* usingNativeWindow */,
+                    &flexibleEquivalent)) {
+                bool marked = false;
+                for (size_t i = 0; i < supportedColors.size(); ++i) {
+                    if (supportedColors[i] == flexibleEquivalent) {
+                        marked = true;
+                        break;
+                    }
+                }
+                if (!marked) {
+                    supportedColors.push(flexibleEquivalent);
+                    builder->addColorFormat(flexibleEquivalent);
+                }
+            }
+            supportedColors.push(portFormat.eColorFormat);
+            builder->addColorFormat(portFormat.eColorFormat);
+
+            if (index == kMaxIndicesToCheck) {
+                ALOGW("[%s] stopping checking formats after %u: %s(%x)",
+                        name.c_str(), index,
+                        asString(portFormat.eColorFormat), portFormat.eColorFormat);
+            }
+        }
+    } else if (mime.equalsIgnoreCase(MEDIA_MIMETYPE_AUDIO_AAC)) {
+        // More audio codecs if they have profiles.
+        OMX_AUDIO_PARAM_ANDROID_PROFILETYPE param;
+        InitOMXParams(&param);
+        param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
+        for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
+            param.nProfileIndex = index;
+            status_t err = omx->getParameter(
+                    node, (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported,
+                    &param, sizeof(param));
+            if (err != OK) {
+                break;
+            }
+            // For audio, level is ignored.
+            builder->addProfileLevel(param.eProfile, 0 /* level */);
+
+            if (index == kMaxIndicesToCheck) {
+                ALOGW("[%s] stopping checking profiles after %u: %x",
+                        name.c_str(), index,
+                        param.eProfile);
+            }
+        }
+
+        // NOTE: Without Android extensions, OMX does not provide a way to query
+        // AAC profile support
+        if (param.nProfileIndex == 0) {
+            ALOGW("component %s doesn't support profile query.", name.c_str());
+        }
+    }
+
+    if (isVideo && !isEncoder) {
+        native_handle_t *sidebandHandle = NULL;
+        if (omx->configureVideoTunnelMode(
+                node, kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
+            // tunneled playback includes adaptive playback
+            builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback
+                    | MediaCodecInfo::Capabilities::kFlagSupportsTunneledPlayback);
+        } else if (omx->storeMetaDataInBuffers(
+                node, kPortIndexOutput, OMX_TRUE) == OK ||
+            omx->prepareForAdaptivePlayback(
+                node, kPortIndexOutput, OMX_TRUE,
+                1280 /* width */, 720 /* height */) == OK) {
+            builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback);
+        }
+    }
+
+    if (isVideo && isEncoder) {
+        OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
+        InitOMXParams(&params);
+        params.nPortIndex = kPortIndexOutput;
+        // TODO: should we verify if fallback is supported?
+        if (omx->getConfig(
+                node, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
+                &params, sizeof(params)) == OK) {
+            builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsIntraRefresh);
+        }
+    }
+
+    *caps = builder;
+    omx->freeNode(node);
+    client.disconnect();
+    return OK;
+}
+
+// These are supposed be equivalent to the logic in
+// "audio_channel_out_mask_from_count".
+//static
+status_t ACodec::getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]) {
+    switch (numChannels) {
+        case 1:
+            map[0] = OMX_AUDIO_ChannelCF;
+            break;
+        case 2:
+            map[0] = OMX_AUDIO_ChannelLF;
+            map[1] = OMX_AUDIO_ChannelRF;
+            break;
+        case 3:
+            map[0] = OMX_AUDIO_ChannelLF;
+            map[1] = OMX_AUDIO_ChannelRF;
+            map[2] = OMX_AUDIO_ChannelCF;
+            break;
+        case 4:
+            map[0] = OMX_AUDIO_ChannelLF;
+            map[1] = OMX_AUDIO_ChannelRF;
+            map[2] = OMX_AUDIO_ChannelLR;
+            map[3] = OMX_AUDIO_ChannelRR;
+            break;
+        case 5:
+            map[0] = OMX_AUDIO_ChannelLF;
+            map[1] = OMX_AUDIO_ChannelRF;
+            map[2] = OMX_AUDIO_ChannelCF;
+            map[3] = OMX_AUDIO_ChannelLR;
+            map[4] = OMX_AUDIO_ChannelRR;
+            break;
+        case 6:
+            map[0] = OMX_AUDIO_ChannelLF;
+            map[1] = OMX_AUDIO_ChannelRF;
+            map[2] = OMX_AUDIO_ChannelCF;
+            map[3] = OMX_AUDIO_ChannelLFE;
+            map[4] = OMX_AUDIO_ChannelLR;
+            map[5] = OMX_AUDIO_ChannelRR;
+            break;
+        case 7:
+            map[0] = OMX_AUDIO_ChannelLF;
+            map[1] = OMX_AUDIO_ChannelRF;
+            map[2] = OMX_AUDIO_ChannelCF;
+            map[3] = OMX_AUDIO_ChannelLFE;
+            map[4] = OMX_AUDIO_ChannelLR;
+            map[5] = OMX_AUDIO_ChannelRR;
+            map[6] = OMX_AUDIO_ChannelCS;
+            break;
+        case 8:
+            map[0] = OMX_AUDIO_ChannelLF;
+            map[1] = OMX_AUDIO_ChannelRF;
+            map[2] = OMX_AUDIO_ChannelCF;
+            map[3] = OMX_AUDIO_ChannelLFE;
+            map[4] = OMX_AUDIO_ChannelLR;
+            map[5] = OMX_AUDIO_ChannelRR;
+            map[6] = OMX_AUDIO_ChannelLS;
+            map[7] = OMX_AUDIO_ChannelRS;
+            break;
+        default:
+            return -EINVAL;
+    }
+
+    return OK;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp
index a6fb3d8..0e98db8 100644
--- a/media/libstagefright/AMRExtractor.cpp
+++ b/media/libstagefright/AMRExtractor.cpp
@@ -100,7 +100,10 @@
 static status_t getFrameSizeByOffset(const sp<DataSource> &source,
         off64_t offset, bool isWide, size_t *frameSize) {
     uint8_t header;
-    if (source->readAt(offset, &header, 1) < 1) {
+    ssize_t count = source->readAt(offset, &header, 1);
+    if (count == 0) {
+        return ERROR_END_OF_STREAM;
+    } else if (count < 0) {
         return ERROR_IO;
     }
 
@@ -140,7 +143,10 @@
 
     if (mDataSource->getSize(&streamSize) == OK) {
          while (offset < streamSize) {
-            if (getFrameSizeByOffset(source, offset, mIsWide, &frameSize) != OK) {
+             status_t status = getFrameSizeByOffset(source, offset, mIsWide, &frameSize);
+             if (status == ERROR_END_OF_STREAM) {
+                 break;
+             } else if (status != OK) {
                 return;
             }
 
@@ -180,7 +186,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-sp<MediaSource> AMRExtractor::getTrack(size_t index) {
+sp<IMediaSource> AMRExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
@@ -309,7 +315,13 @@
         buffer->release();
         buffer = NULL;
 
-        return ERROR_IO;
+        if (n < 0) {
+            return ERROR_IO;
+        } else {
+            // only partial frame is available, treat it as EOS.
+            mOffset += n;
+            return ERROR_END_OF_STREAM;
+        }
     }
 
     buffer->set_range(0, frameSize);
diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp
index f53d7f0..961b57f 100644
--- a/media/libstagefright/AMRWriter.cpp
+++ b/media/libstagefright/AMRWriter.cpp
@@ -54,7 +54,7 @@
     return mInitCheck;
 }
 
-status_t AMRWriter::addSource(const sp<MediaSource> &source) {
+status_t AMRWriter::addSource(const sp<IMediaSource> &source) {
     if (mInitCheck != OK) {
         return mInitCheck;
     }
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 2529aa7..2445842 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -1,7 +1,6 @@
 LOCAL_PATH:= $(call my-dir)
 include $(CLEAR_VARS)
 
-include frameworks/av/media/libstagefright/codecs/common/Config.mk
 
 LOCAL_SRC_FILES:=                         \
         ACodec.cpp                        \
@@ -11,12 +10,11 @@
         AMRWriter.cpp                     \
         AudioPlayer.cpp                   \
         AudioSource.cpp                   \
-        AwesomePlayer.cpp                 \
         CallbackDataSource.cpp            \
         CameraSource.cpp                  \
         CameraSourceTimeLapse.cpp         \
-        ClockEstimator.cpp                \
         CodecBase.cpp                     \
+        DataConverter.cpp                 \
         DataSource.cpp                    \
         DataURISource.cpp                 \
         DRMExtractor.cpp                  \
@@ -25,14 +23,13 @@
         FLACExtractor.cpp                 \
         FrameRenderTracker.cpp            \
         HTTPBase.cpp                      \
+        HevcUtils.cpp                     \
         JPEGSource.cpp                    \
         MP3Extractor.cpp                  \
         MPEG2TSWriter.cpp                 \
         MPEG4Extractor.cpp                \
         MPEG4Writer.cpp                   \
         MediaAdapter.cpp                  \
-        MediaBuffer.cpp                   \
-        MediaBufferGroup.cpp              \
         MediaClock.cpp                    \
         MediaCodec.cpp                    \
         MediaCodecList.cpp                \
@@ -45,23 +42,20 @@
         http/MediaHTTP.cpp                \
         MediaMuxer.cpp                    \
         MediaSource.cpp                   \
-        MetaData.cpp                      \
         NuCachedSource2.cpp               \
         NuMediaExtractor.cpp              \
         OMXClient.cpp                     \
-        OMXCodec.cpp                      \
         OggExtractor.cpp                  \
         ProcessInfo.cpp                   \
         SampleIterator.cpp                \
         SampleTable.cpp                   \
+        SimpleDecodingSource.cpp          \
         SkipCutBuffer.cpp                 \
         StagefrightMediaScanner.cpp       \
         StagefrightMetadataRetriever.cpp  \
         SurfaceMediaSource.cpp            \
         SurfaceUtils.cpp                  \
         ThrottledSource.cpp               \
-        TimeSource.cpp                    \
-        TimedEventQueue.cpp               \
         Utils.cpp                         \
         VBRISeeker.cpp                    \
         VideoFrameScheduler.cpp           \
@@ -79,8 +73,10 @@
         $(TOP)/external/tremolo \
         $(TOP)/external/libvpx/libwebm \
         $(TOP)/system/netd/include \
+        $(call include-path-for, audio-utils)
 
 LOCAL_SHARED_LIBRARIES := \
+        libaudioutils \
         libbinder \
         libcamera_client \
         libcutils \
@@ -135,6 +131,7 @@
 endif
 
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright
 
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index dd9d393..b3fb8d4 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -25,6 +25,7 @@
 #include <media/AudioTrack.h>
 #include <media/openmax/OMX_Audio.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALookup.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/MediaDefs.h>
@@ -33,14 +34,11 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
 
-#include "include/AwesomePlayer.h"
-
 namespace android {
 
 AudioPlayer::AudioPlayer(
         const sp<MediaPlayerBase::AudioSink> &audioSink,
-        uint32_t flags,
-        AwesomePlayer *observer)
+        uint32_t flags)
     : mInputBuffer(NULL),
       mSampleRate(0),
       mLatencyUs(0),
@@ -58,8 +56,6 @@
       mFirstBufferResult(OK),
       mFirstBuffer(NULL),
       mAudioSink(audioSink),
-      mObserver(observer),
-      mPinnedTimeUs(-1ll),
       mPlaying(false),
       mStartPosUs(0),
       mCreateFlags(flags) {
@@ -71,11 +67,19 @@
     }
 }
 
-void AudioPlayer::setSource(const sp<MediaSource> &source) {
+void AudioPlayer::setSource(const sp<IMediaSource> &source) {
     CHECK(mSource == NULL);
     mSource = source;
 }
 
+ALookup<audio_format_t, int32_t> sAudioFormatToPcmEncoding {
+    {
+        { AUDIO_FORMAT_PCM_16_BIT, kAudioEncodingPcm16bit },
+        { AUDIO_FORMAT_PCM_8_BIT,  kAudioEncodingPcm8bit  },
+        { AUDIO_FORMAT_PCM_FLOAT,  kAudioEncodingPcmFloat },
+    }
+};
+
 status_t AudioPlayer::start(bool sourceAlreadyStarted) {
     CHECK(!mStarted);
     CHECK(mSource != NULL);
@@ -134,6 +138,10 @@
     }
 
     audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
+    int32_t pcmEncoding;
+    if (format->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
+        sAudioFormatToPcmEncoding.map(pcmEncoding, &audioFormat);
+    }
 
     if (useOffload()) {
         if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
@@ -256,7 +264,6 @@
 
     mStarted = true;
     mPlaying = true;
-    mPinnedTimeUs = -1ll;
 
     return OK;
 }
@@ -279,8 +286,6 @@
         } else {
             mAudioTrack->pause();
         }
-
-        mPinnedTimeUs = ALooper::GetNowUs();
     }
 
     mPlaying = false;
@@ -358,7 +363,7 @@
     // When offloading, the OMX component is not used so this hack
     // is not needed
     if (!useOffload()) {
-        wp<MediaSource> tmp = mSource;
+        wp<IMediaSource> tmp = mSource;
         mSource.clear();
         while (tmp.promote() != NULL) {
             usleep(1000);
@@ -386,11 +391,6 @@
     static_cast<AudioPlayer *>(user)->AudioCallback(event, info);
 }
 
-bool AudioPlayer::isSeeking() {
-    Mutex::Autolock autoLock(mLock);
-    return mSeeking;
-}
-
 bool AudioPlayer::reachedEOS(status_t *finalStatus) {
     *finalStatus = OK;
 
@@ -399,15 +399,6 @@
     return mReachedEOS;
 }
 
-void AudioPlayer::notifyAudioEOS() {
-    ALOGV("AudioPlayer@0x%p notifyAudioEOS", this);
-
-    if (mObserver != NULL) {
-        mObserver->postAudioEOS(0);
-        ALOGV("Notified observer of EOS!");
-    }
-}
-
 status_t AudioPlayer::setPlaybackRate(const AudioPlaybackRate &rate) {
     if (mAudioSink.get() != NULL) {
         return mAudioSink->setPlaybackRate(rate);
@@ -443,12 +434,10 @@
     case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
         ALOGV("AudioSinkCallback: stream end");
         me->mReachedEOS = true;
-        me->notifyAudioEOS();
         break;
 
     case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
         ALOGV("AudioSinkCallback: Tear down event");
-        me->mObserver->postAudioTearDown();
         break;
     }
 
@@ -467,31 +456,10 @@
 
     case AudioTrack::EVENT_STREAM_END:
         mReachedEOS = true;
-        notifyAudioEOS();
         break;
     }
 }
 
-uint32_t AudioPlayer::getNumFramesPendingPlayout() const {
-    uint32_t numFramesPlayedOut;
-    status_t err;
-
-    if (mAudioSink != NULL) {
-        err = mAudioSink->getPosition(&numFramesPlayedOut);
-    } else {
-        err = mAudioTrack->getPosition(&numFramesPlayedOut);
-    }
-
-    if (err != OK || mNumFramesPlayed < numFramesPlayedOut) {
-        return 0;
-    }
-
-    // mNumFramesPlayed is the number of frames submitted
-    // to the audio sink for playback, but not all of them
-    // may have played out by now.
-    return mNumFramesPlayed - numFramesPlayedOut;
-}
-
 size_t AudioPlayer::fillBuffer(void *data, size_t size) {
     if (mNumFramesPlayed == 0) {
         ALOGV("AudioCallback");
@@ -501,10 +469,6 @@
         return 0;
     }
 
-    bool postSeekComplete = false;
-    bool postEOS = false;
-    int64_t postEOSDelayUs = 0;
-
     size_t size_done = 0;
     size_t size_remaining = size;
     while (size_remaining > 0) {
@@ -532,9 +496,6 @@
                 }
 
                 mSeeking = false;
-                if (mObserver) {
-                    postSeekComplete = true;
-                }
             }
         }
 
@@ -567,42 +528,6 @@
                             mAudioTrack->stop();
                         }
                     } else {
-                        if (mObserver) {
-                            // We don't want to post EOS right away but only
-                            // after all frames have actually been played out.
-
-                            // These are the number of frames submitted to the
-                            // AudioTrack that you haven't heard yet.
-                            uint32_t numFramesPendingPlayout =
-                                getNumFramesPendingPlayout();
-
-                            // These are the number of frames we're going to
-                            // submit to the AudioTrack by returning from this
-                            // callback.
-                            uint32_t numAdditionalFrames = size_done / mFrameSize;
-
-                            numFramesPendingPlayout += numAdditionalFrames;
-
-                            int64_t timeToCompletionUs =
-                                (1000000ll * numFramesPendingPlayout) / mSampleRate;
-
-                            ALOGV("total number of frames played: %" PRId64 " (%lld us)",
-                                    (mNumFramesPlayed + numAdditionalFrames),
-                                    1000000ll * (mNumFramesPlayed + numAdditionalFrames)
-                                        / mSampleRate);
-
-                            ALOGV("%d frames left to play, %" PRId64 " us (%.2f secs)",
-                                 numFramesPendingPlayout,
-                                 timeToCompletionUs, timeToCompletionUs / 1E6);
-
-                            postEOS = true;
-                            if (mAudioSink->needsTrailingPadding()) {
-                                postEOSDelayUs = timeToCompletionUs + mLatencyUs;
-                            } else {
-                                postEOSDelayUs = 0;
-                            }
-                        }
-
                         mReachedEOS = true;
                     }
                 }
@@ -626,12 +551,6 @@
             // might not be able to get the exact seek time requested.
             if (refreshSeekTime) {
                 if (useOffload()) {
-                    if (postSeekComplete) {
-                        ALOGV("fillBuffer is going to post SEEK_COMPLETE");
-                        mObserver->postAudioSeekComplete();
-                        postSeekComplete = false;
-                    }
-
                     mStartPosUs = mPositionTimeMediaUs;
                     ALOGV("adjust seek time to: %.2f", mStartPosUs/ 1E6);
                 }
@@ -690,58 +609,11 @@
         Mutex::Autolock autoLock(mLock);
         mNumFramesPlayed += size_done / mFrameSize;
         mNumFramesPlayedSysTimeUs = ALooper::GetNowUs();
-
-        if (mReachedEOS) {
-            mPinnedTimeUs = mNumFramesPlayedSysTimeUs;
-        } else {
-            mPinnedTimeUs = -1ll;
-        }
-    }
-
-    if (postEOS) {
-        mObserver->postAudioEOS(postEOSDelayUs);
-    }
-
-    if (postSeekComplete) {
-        mObserver->postAudioSeekComplete();
     }
 
     return size_done;
 }
 
-int64_t AudioPlayer::getRealTimeUs() {
-    Mutex::Autolock autoLock(mLock);
-    if (useOffload()) {
-        if (mSeeking) {
-            return mSeekTimeUs;
-        }
-        mPositionTimeRealUs = getOutputPlayPositionUs_l();
-        return mPositionTimeRealUs;
-    }
-
-    return getRealTimeUsLocked();
-}
-
-int64_t AudioPlayer::getRealTimeUsLocked() const {
-    CHECK(mStarted);
-    CHECK_NE(mSampleRate, 0);
-    int64_t result = -mLatencyUs + (mNumFramesPlayed * 1000000) / mSampleRate;
-
-    // Compensate for large audio buffers, updates of mNumFramesPlayed
-    // are less frequent, therefore to get a "smoother" notion of time we
-    // compensate using system time.
-    int64_t diffUs;
-    if (mPinnedTimeUs >= 0ll) {
-        diffUs = mPinnedTimeUs;
-    } else {
-        diffUs = ALooper::GetNowUs();
-    }
-
-    diffUs -= mNumFramesPlayedSysTimeUs;
-
-    return result + diffUs;
-}
-
 int64_t AudioPlayer::getOutputPlayPositionUs_l()
 {
     uint32_t playedSamples = 0;
@@ -770,54 +642,6 @@
     return renderedDuration;
 }
 
-int64_t AudioPlayer::getMediaTimeUs() {
-    Mutex::Autolock autoLock(mLock);
-
-    if (useOffload()) {
-        if (mSeeking) {
-            return mSeekTimeUs;
-        }
-        if (mReachedEOS) {
-            int64_t durationUs;
-            mSource->getFormat()->findInt64(kKeyDuration, &durationUs);
-            return durationUs;
-        }
-        mPositionTimeRealUs = getOutputPlayPositionUs_l();
-        ALOGV("getMediaTimeUs getOutputPlayPositionUs_l() mPositionTimeRealUs %" PRId64,
-              mPositionTimeRealUs);
-        return mPositionTimeRealUs;
-    }
-
-
-    if (mPositionTimeMediaUs < 0 || mPositionTimeRealUs < 0) {
-        // mSeekTimeUs is either seek time while seeking or 0 if playback did not start.
-        return mSeekTimeUs;
-    }
-
-    int64_t realTimeOffset = getRealTimeUsLocked() - mPositionTimeRealUs;
-    if (realTimeOffset < 0) {
-        realTimeOffset = 0;
-    }
-
-    return mPositionTimeMediaUs + realTimeOffset;
-}
-
-bool AudioPlayer::getMediaTimeMapping(
-        int64_t *realtime_us, int64_t *mediatime_us) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (useOffload()) {
-        mPositionTimeRealUs = getOutputPlayPositionUs_l();
-        *realtime_us = mPositionTimeRealUs;
-        *mediatime_us = mPositionTimeRealUs;
-    } else {
-        *realtime_us = mPositionTimeRealUs;
-        *mediatime_us = mPositionTimeMediaUs;
-    }
-
-    return mPositionTimeRealUs != -1 && mPositionTimeMediaUs != -1;
-}
-
 status_t AudioPlayer::seekTo(int64_t time_us) {
     Mutex::Autolock autoLock(mLock);
 
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 55f4361..790c6da 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -51,7 +51,8 @@
 
 AudioSource::AudioSource(
         audio_source_t inputSource, const String16 &opPackageName,
-        uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate)
+        uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate,
+        uid_t uid, pid_t pid)
     : mStarted(false),
       mSampleRate(sampleRate),
       mOutSampleRate(outSampleRate > 0 ? outSampleRate : sampleRate),
@@ -91,7 +92,12 @@
                     (size_t) (bufCount * frameCount),
                     AudioRecordCallbackFunction,
                     this,
-                    frameCount /*notificationFrames*/);
+                    frameCount /*notificationFrames*/,
+                    AUDIO_SESSION_ALLOCATE,
+                    AudioRecord::TRANSFER_DEFAULT,
+                    AUDIO_INPUT_FLAG_NONE,
+                    uid,
+                    pid);
         mInitCheck = mRecord->initCheck();
         if (mInitCheck != OK) {
             mRecord.clear();
@@ -188,6 +194,7 @@
     meta->setInt32(kKeySampleRate, mSampleRate);
     meta->setInt32(kKeyChannelCount, mRecord->channelCount());
     meta->setInt32(kKeyMaxInputSize, kMaxBufferSize);
+    meta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
 
     return meta;
 }
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
deleted file mode 100644
index 3cd0b0e..0000000
--- a/media/libstagefright/AwesomePlayer.cpp
+++ /dev/null
@@ -1,3047 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#undef DEBUG_HDCP
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "AwesomePlayer"
-#define ATRACE_TAG ATRACE_TAG_VIDEO
-
-#include <inttypes.h>
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-
-#include <dlfcn.h>
-
-#include "include/AwesomePlayer.h"
-#include "include/DRMExtractor.h"
-#include "include/SoftwareRenderer.h"
-#include "include/NuCachedSource2.h"
-#include "include/ThrottledSource.h"
-#include "include/MPEG2TSExtractor.h"
-#include "include/WVMExtractor.h"
-
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
-#include <media/IMediaPlayerService.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/timedtext/TimedTextDriver.h>
-#include <media/stagefright/AudioPlayer.h>
-#include <media/stagefright/ClockEstimator.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaHTTP.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXCodec.h>
-#include <media/stagefright/Utils.h>
-
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-
-#include <media/stagefright/foundation/AMessage.h>
-
-#include <cutils/properties.h>
-
-#define USE_SURFACE_ALLOC 1
-#define FRAME_DROP_FREQ 0
-
-namespace android {
-
-static int64_t kLowWaterMarkUs = 2000000ll;  // 2secs
-static int64_t kHighWaterMarkUs = 5000000ll;  // 5secs
-static const size_t kLowWaterMarkBytes = 40000;
-static const size_t kHighWaterMarkBytes = 200000;
-
-// maximum time in paused state when offloading audio decompression. When elapsed, the AudioPlayer
-// is destroyed to allow the audio DSP to power down.
-static int64_t kOffloadPauseMaxUs = 10000000ll;
-
-
-struct AwesomeEvent : public TimedEventQueue::Event {
-    AwesomeEvent(
-            AwesomePlayer *player,
-            void (AwesomePlayer::*method)())
-        : mPlayer(player),
-          mMethod(method) {
-    }
-
-protected:
-    virtual ~AwesomeEvent() {}
-
-    virtual void fire(TimedEventQueue * /* queue */, int64_t /* now_us */) {
-        (mPlayer->*mMethod)();
-    }
-
-private:
-    AwesomePlayer *mPlayer;
-    void (AwesomePlayer::*mMethod)();
-
-    AwesomeEvent(const AwesomeEvent &);
-    AwesomeEvent &operator=(const AwesomeEvent &);
-};
-
-struct AwesomeLocalRenderer : public AwesomeRenderer {
-    AwesomeLocalRenderer(
-            const sp<ANativeWindow> &nativeWindow, const sp<AMessage> &format)
-        : mFormat(format),
-          mTarget(new SoftwareRenderer(nativeWindow)) {
-    }
-
-    virtual void render(MediaBuffer *buffer) {
-        int64_t timeUs;
-        CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
-
-        render((const uint8_t *)buffer->data() + buffer->range_offset(),
-               buffer->range_length(), timeUs, timeUs * 1000);
-    }
-
-    void render(const void *data, size_t size, int64_t mediaTimeUs, nsecs_t renderTimeNs) {
-        (void)mTarget->render(data, size, mediaTimeUs, renderTimeNs, NULL, mFormat);
-    }
-
-protected:
-    virtual ~AwesomeLocalRenderer() {
-        delete mTarget;
-        mTarget = NULL;
-    }
-
-private:
-    sp<AMessage> mFormat;
-    SoftwareRenderer *mTarget;
-
-    AwesomeLocalRenderer(const AwesomeLocalRenderer &);
-    AwesomeLocalRenderer &operator=(const AwesomeLocalRenderer &);;
-};
-
-struct AwesomeNativeWindowRenderer : public AwesomeRenderer {
-    AwesomeNativeWindowRenderer(
-            const sp<ANativeWindow> &nativeWindow,
-            int32_t rotationDegrees)
-        : mNativeWindow(nativeWindow) {
-        applyRotation(rotationDegrees);
-    }
-
-    virtual void render(MediaBuffer *buffer) {
-        ATRACE_CALL();
-        int64_t timeUs;
-        CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
-        native_window_set_buffers_timestamp(mNativeWindow.get(), timeUs * 1000);
-        status_t err = mNativeWindow->queueBuffer(
-                mNativeWindow.get(), buffer->graphicBuffer().get(), -1);
-        if (err != 0) {
-            ALOGE("queueBuffer failed with error %s (%d)", strerror(-err),
-                    -err);
-            return;
-        }
-
-        sp<MetaData> metaData = buffer->meta_data();
-        metaData->setInt32(kKeyRendered, 1);
-    }
-
-protected:
-    virtual ~AwesomeNativeWindowRenderer() {}
-
-private:
-    sp<ANativeWindow> mNativeWindow;
-
-    void applyRotation(int32_t rotationDegrees) {
-        uint32_t transform;
-        switch (rotationDegrees) {
-            case 0: transform = 0; break;
-            case 90: transform = HAL_TRANSFORM_ROT_90; break;
-            case 180: transform = HAL_TRANSFORM_ROT_180; break;
-            case 270: transform = HAL_TRANSFORM_ROT_270; break;
-            default: transform = 0; break;
-        }
-
-        if (transform) {
-            CHECK_EQ(0, native_window_set_buffers_transform(
-                        mNativeWindow.get(), transform));
-        }
-    }
-
-    AwesomeNativeWindowRenderer(const AwesomeNativeWindowRenderer &);
-    AwesomeNativeWindowRenderer &operator=(
-            const AwesomeNativeWindowRenderer &);
-};
-
-// To collect the decoder usage
-void addBatteryData(uint32_t params) {
-    sp<IBinder> binder =
-        defaultServiceManager()->getService(String16("media.player"));
-    sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
-    CHECK(service.get() != NULL);
-
-    service->addBatteryData(params);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-AwesomePlayer::AwesomePlayer()
-    : mQueueStarted(false),
-      mUIDValid(false),
-      mTimeSource(NULL),
-      mVideoRenderingStarted(false),
-      mVideoRendererIsPreview(false),
-      mMediaRenderingStartGeneration(0),
-      mStartGeneration(0),
-      mAudioPlayer(NULL),
-      mDisplayWidth(0),
-      mDisplayHeight(0),
-      mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW),
-      mFlags(0),
-      mExtractorFlags(0),
-      mVideoBuffer(NULL),
-      mDecryptHandle(NULL),
-      mLastVideoTimeUs(-1),
-      mTextDriver(NULL),
-      mOffloadAudio(false),
-      mAudioTearDown(false) {
-    CHECK_EQ(mClient.connect(), (status_t)OK);
-
-    DataSource::RegisterDefaultSniffers();
-
-    mVideoEvent = new AwesomeEvent(this, &AwesomePlayer::onVideoEvent);
-    mVideoEventPending = false;
-    mStreamDoneEvent = new AwesomeEvent(this, &AwesomePlayer::onStreamDone);
-    mStreamDoneEventPending = false;
-    mBufferingEvent = new AwesomeEvent(this, &AwesomePlayer::onBufferingUpdate);
-    mBufferingEventPending = false;
-    mVideoLagEvent = new AwesomeEvent(this, &AwesomePlayer::onVideoLagUpdate);
-    mVideoLagEventPending = false;
-
-    mCheckAudioStatusEvent = new AwesomeEvent(
-            this, &AwesomePlayer::onCheckAudioStatus);
-
-    mAudioStatusEventPending = false;
-
-    mAudioTearDownEvent = new AwesomeEvent(this,
-                              &AwesomePlayer::onAudioTearDownEvent);
-    mAudioTearDownEventPending = false;
-
-    mClockEstimator = new WindowedLinearFitEstimator();
-
-    mPlaybackSettings = AUDIO_PLAYBACK_RATE_DEFAULT;
-
-    reset();
-}
-
-AwesomePlayer::~AwesomePlayer() {
-    if (mQueueStarted) {
-        mQueue.stop();
-    }
-
-    reset();
-
-    mClient.disconnect();
-}
-
-void AwesomePlayer::cancelPlayerEvents(bool keepNotifications) {
-    mQueue.cancelEvent(mVideoEvent->eventID());
-    mVideoEventPending = false;
-    mQueue.cancelEvent(mVideoLagEvent->eventID());
-    mVideoLagEventPending = false;
-
-    if (mOffloadAudio) {
-        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
-        mAudioTearDownEventPending = false;
-    }
-
-    if (!keepNotifications) {
-        mQueue.cancelEvent(mStreamDoneEvent->eventID());
-        mStreamDoneEventPending = false;
-        mQueue.cancelEvent(mCheckAudioStatusEvent->eventID());
-        mAudioStatusEventPending = false;
-
-        mQueue.cancelEvent(mBufferingEvent->eventID());
-        mBufferingEventPending = false;
-        mAudioTearDown = false;
-    }
-}
-
-void AwesomePlayer::setListener(const wp<MediaPlayerBase> &listener) {
-    Mutex::Autolock autoLock(mLock);
-    mListener = listener;
-}
-
-void AwesomePlayer::setUID(uid_t uid) {
-    ALOGV("AwesomePlayer running on behalf of uid %d", uid);
-
-    mUID = uid;
-    mUIDValid = true;
-}
-
-status_t AwesomePlayer::setDataSource(
-        const sp<IMediaHTTPService> &httpService,
-        const char *uri,
-        const KeyedVector<String8, String8> *headers) {
-    Mutex::Autolock autoLock(mLock);
-    return setDataSource_l(httpService, uri, headers);
-}
-
-status_t AwesomePlayer::setDataSource_l(
-        const sp<IMediaHTTPService> &httpService,
-        const char *uri,
-        const KeyedVector<String8, String8> *headers) {
-    reset_l();
-
-    mHTTPService = httpService;
-    mUri = uri;
-
-    if (headers) {
-        mUriHeaders = *headers;
-
-        ssize_t index = mUriHeaders.indexOfKey(String8("x-hide-urls-from-log"));
-        if (index >= 0) {
-            // Browser is in "incognito" mode, suppress logging URLs.
-
-            // This isn't something that should be passed to the server.
-            mUriHeaders.removeItemsAt(index);
-
-            modifyFlags(INCOGNITO, SET);
-        }
-    }
-
-    ALOGI("setDataSource_l(%s)", uriDebugString(mUri, mFlags & INCOGNITO).c_str());
-
-    // The actual work will be done during preparation in the call to
-    // ::finishSetDataSource_l to avoid blocking the calling thread in
-    // setDataSource for any significant time.
-
-    {
-        Mutex::Autolock autoLock(mStatsLock);
-        mStats.mFd = -1;
-        mStats.mURI = mUri;
-    }
-
-    return OK;
-}
-
-status_t AwesomePlayer::setDataSource(
-        int fd, int64_t offset, int64_t length) {
-    Mutex::Autolock autoLock(mLock);
-
-    reset_l();
-
-    sp<DataSource> dataSource = new FileSource(fd, offset, length);
-
-    status_t err = dataSource->initCheck();
-
-    if (err != OK) {
-        return err;
-    }
-
-    mFileSource = dataSource;
-
-    {
-        Mutex::Autolock autoLock(mStatsLock);
-        mStats.mFd = fd;
-        mStats.mURI = String8();
-    }
-
-    return setDataSource_l(dataSource);
-}
-
-status_t AwesomePlayer::setDataSource(const sp<IStreamSource> &source __unused) {
-    return INVALID_OPERATION;
-}
-
-status_t AwesomePlayer::setDataSource_l(
-        const sp<DataSource> &dataSource) {
-    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
-
-    if (extractor == NULL) {
-        return UNKNOWN_ERROR;
-    }
-
-    if (extractor->getDrmFlag()) {
-        checkDrmStatus(dataSource);
-    }
-
-    return setDataSource_l(extractor);
-}
-
-void AwesomePlayer::checkDrmStatus(const sp<DataSource>& dataSource) {
-    dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
-    if (mDecryptHandle != NULL) {
-        CHECK(mDrmManagerClient);
-        if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
-            notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
-        }
-    }
-}
-
-status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
-    // Attempt to approximate overall stream bitrate by summing all
-    // tracks' individual bitrates, if not all of them advertise bitrate,
-    // we have to fail.
-
-    int64_t totalBitRate = 0;
-
-    mExtractor = extractor;
-    for (size_t i = 0; i < extractor->countTracks(); ++i) {
-        sp<MetaData> meta = extractor->getTrackMetaData(i);
-
-        int32_t bitrate;
-        if (!meta->findInt32(kKeyBitRate, &bitrate)) {
-            const char *mime;
-            CHECK(meta->findCString(kKeyMIMEType, &mime));
-            ALOGV("track of type '%s' does not publish bitrate", mime);
-
-            totalBitRate = -1;
-            break;
-        }
-
-        totalBitRate += bitrate;
-    }
-    sp<MetaData> fileMeta = mExtractor->getMetaData();
-    if (fileMeta != NULL) {
-        int64_t duration;
-        if (fileMeta->findInt64(kKeyDuration, &duration)) {
-            mDurationUs = duration;
-        }
-    }
-
-    mBitrate = totalBitRate;
-
-    ALOGV("mBitrate = %lld bits/sec", (long long)mBitrate);
-
-    {
-        Mutex::Autolock autoLock(mStatsLock);
-        mStats.mBitrate = mBitrate;
-        mStats.mTracks.clear();
-        mStats.mAudioTrackIndex = -1;
-        mStats.mVideoTrackIndex = -1;
-    }
-
-    bool haveAudio = false;
-    bool haveVideo = false;
-    for (size_t i = 0; i < extractor->countTracks(); ++i) {
-        sp<MetaData> meta = extractor->getTrackMetaData(i);
-
-        const char *_mime;
-        CHECK(meta->findCString(kKeyMIMEType, &_mime));
-
-        String8 mime = String8(_mime);
-
-        if (!haveVideo && !strncasecmp(mime.string(), "video/", 6)) {
-            setVideoSource(extractor->getTrack(i));
-            haveVideo = true;
-
-            // Set the presentation/display size
-            int32_t displayWidth, displayHeight;
-            bool success = meta->findInt32(kKeyDisplayWidth, &displayWidth);
-            if (success) {
-                success = meta->findInt32(kKeyDisplayHeight, &displayHeight);
-            }
-            if (success) {
-                mDisplayWidth = displayWidth;
-                mDisplayHeight = displayHeight;
-            }
-
-            {
-                Mutex::Autolock autoLock(mStatsLock);
-                mStats.mVideoTrackIndex = mStats.mTracks.size();
-                mStats.mTracks.push();
-                TrackStat *stat =
-                    &mStats.mTracks.editItemAt(mStats.mVideoTrackIndex);
-                stat->mMIME = mime.string();
-            }
-        } else if (!haveAudio && !strncasecmp(mime.string(), "audio/", 6)) {
-            setAudioSource(extractor->getTrack(i));
-            haveAudio = true;
-            mActiveAudioTrackIndex = i;
-
-            {
-                Mutex::Autolock autoLock(mStatsLock);
-                mStats.mAudioTrackIndex = mStats.mTracks.size();
-                mStats.mTracks.push();
-                TrackStat *stat =
-                    &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
-                stat->mMIME = mime.string();
-            }
-
-            if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_AUDIO_VORBIS)) {
-                // Only do this for vorbis audio, none of the other audio
-                // formats even support this ringtone specific hack and
-                // retrieving the metadata on some extractors may turn out
-                // to be very expensive.
-                sp<MetaData> fileMeta = extractor->getMetaData();
-                int32_t loop;
-                if (fileMeta != NULL
-                        && fileMeta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
-                    modifyFlags(AUTO_LOOPING, SET);
-                }
-            }
-        } else if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_TEXT_3GPP)) {
-            addTextSource_l(i, extractor->getTrack(i));
-        }
-    }
-
-    if (!haveAudio && !haveVideo) {
-        if (mWVMExtractor != NULL) {
-            return mWVMExtractor->getError();
-        } else {
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    mExtractorFlags = extractor->flags();
-
-    return OK;
-}
-
-void AwesomePlayer::reset() {
-    Mutex::Autolock autoLock(mLock);
-    reset_l();
-}
-
-void AwesomePlayer::reset_l() {
-    mVideoRenderingStarted = false;
-    mActiveAudioTrackIndex = -1;
-    mDisplayWidth = 0;
-    mDisplayHeight = 0;
-
-    notifyListener_l(MEDIA_STOPPED);
-
-    if (mDecryptHandle != NULL) {
-            mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
-                    Playback::STOP, 0);
-            mDecryptHandle = NULL;
-            mDrmManagerClient = NULL;
-    }
-
-    if (mFlags & PLAYING) {
-        uint32_t params = IMediaPlayerService::kBatteryDataTrackDecoder;
-        if ((mAudioSource != NULL) && (mAudioSource != mAudioTrack)) {
-            params |= IMediaPlayerService::kBatteryDataTrackAudio;
-        }
-        if (mVideoSource != NULL) {
-            params |= IMediaPlayerService::kBatteryDataTrackVideo;
-        }
-        addBatteryData(params);
-    }
-
-    if (mFlags & PREPARING) {
-        modifyFlags(PREPARE_CANCELLED, SET);
-        if (mConnectingDataSource != NULL) {
-            ALOGI("interrupting the connection process");
-            mConnectingDataSource->disconnect();
-        }
-
-        if (mFlags & PREPARING_CONNECTED) {
-            // We are basically done preparing, we're just buffering
-            // enough data to start playback, we can safely interrupt that.
-            finishAsyncPrepare_l();
-        }
-    }
-
-    while (mFlags & PREPARING) {
-        mPreparedCondition.wait(mLock);
-    }
-
-    cancelPlayerEvents();
-
-    mWVMExtractor.clear();
-    mCachedSource.clear();
-    mAudioTrack.clear();
-    mVideoTrack.clear();
-    mExtractor.clear();
-
-    // Shutdown audio first, so that the response to the reset request
-    // appears to happen instantaneously as far as the user is concerned
-    // If we did this later, audio would continue playing while we
-    // shutdown the video-related resources and the player appear to
-    // not be as responsive to a reset request.
-    if ((mAudioPlayer == NULL || !(mFlags & AUDIOPLAYER_STARTED))
-            && mAudioSource != NULL) {
-        // If we had an audio player, it would have effectively
-        // taken possession of the audio source and stopped it when
-        // _it_ is stopped. Otherwise this is still our responsibility.
-        mAudioSource->stop();
-    }
-    mAudioSource.clear();
-    mOmxSource.clear();
-
-    mTimeSource = NULL;
-
-    delete mAudioPlayer;
-    mAudioPlayer = NULL;
-
-    if (mTextDriver != NULL) {
-        delete mTextDriver;
-        mTextDriver = NULL;
-    }
-
-    mVideoRenderer.clear();
-
-    if (mVideoSource != NULL) {
-        shutdownVideoDecoder_l();
-    }
-
-    mDurationUs = -1;
-    modifyFlags(0, ASSIGN);
-    mExtractorFlags = 0;
-    mTimeSourceDeltaUs = 0;
-    mVideoTimeUs = 0;
-
-    mSeeking = NO_SEEK;
-    mSeekNotificationSent = true;
-    mSeekTimeUs = 0;
-
-    mHTTPService.clear();
-    mUri.setTo("");
-    mUriHeaders.clear();
-
-    mFileSource.clear();
-
-    mBitrate = -1;
-    mLastVideoTimeUs = -1;
-
-    {
-        Mutex::Autolock autoLock(mStatsLock);
-        mStats.mFd = -1;
-        mStats.mURI = String8();
-        mStats.mBitrate = -1;
-        mStats.mAudioTrackIndex = -1;
-        mStats.mVideoTrackIndex = -1;
-        mStats.mNumVideoFramesDecoded = 0;
-        mStats.mNumVideoFramesDropped = 0;
-        mStats.mVideoWidth = -1;
-        mStats.mVideoHeight = -1;
-        mStats.mFlags = 0;
-        mStats.mTracks.clear();
-    }
-
-    mWatchForAudioSeekComplete = false;
-    mWatchForAudioEOS = false;
-
-    mMediaRenderingStartGeneration = 0;
-    mStartGeneration = 0;
-}
-
-void AwesomePlayer::notifyListener_l(int msg, int ext1, int ext2) {
-    if ((mListener != NULL) && !mAudioTearDown) {
-        sp<MediaPlayerBase> listener = mListener.promote();
-
-        if (listener != NULL) {
-            listener->sendEvent(msg, ext1, ext2);
-        }
-    }
-}
-
-bool AwesomePlayer::getBitrate(int64_t *bitrate) {
-    off64_t size;
-    if (mDurationUs > 0 && mCachedSource != NULL
-            && mCachedSource->getSize(&size) == OK) {
-        *bitrate = size * 8000000ll / mDurationUs;  // in bits/sec
-        return true;
-    }
-
-    if (mBitrate >= 0) {
-        *bitrate = mBitrate;
-        return true;
-    }
-
-    *bitrate = 0;
-
-    return false;
-}
-
-// Returns true iff cached duration is available/applicable.
-bool AwesomePlayer::getCachedDuration_l(int64_t *durationUs, bool *eos) {
-    int64_t bitrate;
-
-    if (mCachedSource != NULL && getBitrate(&bitrate) && (bitrate > 0)) {
-        status_t finalStatus;
-        size_t cachedDataRemaining = mCachedSource->approxDataRemaining(&finalStatus);
-        *durationUs = cachedDataRemaining * 8000000ll / bitrate;
-        *eos = (finalStatus != OK);
-        return true;
-    } else if (mWVMExtractor != NULL) {
-        status_t finalStatus;
-        *durationUs = mWVMExtractor->getCachedDurationUs(&finalStatus);
-        *eos = (finalStatus != OK);
-        return true;
-    }
-
-    return false;
-}
-
-void AwesomePlayer::ensureCacheIsFetching_l() {
-    if (mCachedSource != NULL) {
-        mCachedSource->resumeFetchingIfNecessary();
-    }
-}
-
-void AwesomePlayer::onVideoLagUpdate() {
-    Mutex::Autolock autoLock(mLock);
-    if (!mVideoLagEventPending) {
-        return;
-    }
-    mVideoLagEventPending = false;
-
-    int64_t audioTimeUs = mAudioPlayer->getMediaTimeUs();
-    int64_t videoLateByUs = audioTimeUs - mVideoTimeUs;
-
-    if (!(mFlags & VIDEO_AT_EOS) && videoLateByUs > 300000ll) {
-        ALOGV("video late by %lld ms.", videoLateByUs / 1000ll);
-
-        notifyListener_l(
-                MEDIA_INFO,
-                MEDIA_INFO_VIDEO_TRACK_LAGGING,
-                videoLateByUs / 1000ll);
-    }
-
-    postVideoLagEvent_l();
-}
-
-void AwesomePlayer::onBufferingUpdate() {
-    Mutex::Autolock autoLock(mLock);
-    if (!mBufferingEventPending) {
-        return;
-    }
-    mBufferingEventPending = false;
-
-    if (mCachedSource != NULL) {
-        status_t finalStatus;
-        size_t cachedDataRemaining = mCachedSource->approxDataRemaining(&finalStatus);
-        bool eos = (finalStatus != OK);
-
-        if (eos) {
-            if (finalStatus == ERROR_END_OF_STREAM) {
-                notifyListener_l(MEDIA_BUFFERING_UPDATE, 100);
-            }
-            if (mFlags & PREPARING) {
-                ALOGV("cache has reached EOS, prepare is done.");
-                finishAsyncPrepare_l();
-            }
-        } else {
-            bool eos2;
-            int64_t cachedDurationUs;
-            if (getCachedDuration_l(&cachedDurationUs, &eos2) && mDurationUs > 0) {
-                int percentage = 100.0 * (double)cachedDurationUs / mDurationUs;
-                if (percentage > 100) {
-                    percentage = 100;
-                }
-
-                notifyListener_l(MEDIA_BUFFERING_UPDATE, percentage);
-            } else {
-                // We don't know the bitrate/duration of the stream, use absolute size
-                // limits to maintain the cache.
-
-                if ((mFlags & PLAYING) && !eos
-                        && (cachedDataRemaining < kLowWaterMarkBytes)) {
-                    ALOGI("cache is running low (< %zu) , pausing.",
-                         kLowWaterMarkBytes);
-                    modifyFlags(CACHE_UNDERRUN, SET);
-                    pause_l();
-                    ensureCacheIsFetching_l();
-                    sendCacheStats();
-                    notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START);
-                } else if (eos || cachedDataRemaining > kHighWaterMarkBytes) {
-                    if (mFlags & CACHE_UNDERRUN) {
-                        ALOGI("cache has filled up (> %zu), resuming.",
-                             kHighWaterMarkBytes);
-                        modifyFlags(CACHE_UNDERRUN, CLEAR);
-                        play_l();
-                    } else if (mFlags & PREPARING) {
-                        ALOGV("cache has filled up (> %zu), prepare is done",
-                             kHighWaterMarkBytes);
-                        finishAsyncPrepare_l();
-                    }
-                }
-            }
-        }
-    } else if (mWVMExtractor != NULL) {
-        status_t finalStatus;
-
-        int64_t cachedDurationUs
-            = mWVMExtractor->getCachedDurationUs(&finalStatus);
-
-        bool eos = (finalStatus != OK);
-
-        if (eos) {
-            if (finalStatus == ERROR_END_OF_STREAM) {
-                notifyListener_l(MEDIA_BUFFERING_UPDATE, 100);
-            }
-            if (mFlags & PREPARING) {
-                ALOGV("cache has reached EOS, prepare is done.");
-                finishAsyncPrepare_l();
-            }
-        } else {
-            int percentage = 100.0 * (double)cachedDurationUs / mDurationUs;
-            if (percentage > 100) {
-                percentage = 100;
-            }
-
-            notifyListener_l(MEDIA_BUFFERING_UPDATE, percentage);
-        }
-    }
-
-    int64_t cachedDurationUs;
-    bool eos;
-    if (getCachedDuration_l(&cachedDurationUs, &eos)) {
-        ALOGV("cachedDurationUs = %.2f secs, eos=%d",
-             cachedDurationUs / 1E6, eos);
-
-        if ((mFlags & PLAYING) && !eos
-                && (cachedDurationUs < kLowWaterMarkUs)) {
-            modifyFlags(CACHE_UNDERRUN, SET);
-            ALOGI("cache is running low (%.2f secs) , pausing.",
-                  cachedDurationUs / 1E6);
-            pause_l();
-            ensureCacheIsFetching_l();
-            sendCacheStats();
-            notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START);
-        } else if (eos || cachedDurationUs > kHighWaterMarkUs) {
-            if (mFlags & CACHE_UNDERRUN) {
-                modifyFlags(CACHE_UNDERRUN, CLEAR);
-                ALOGI("cache has filled up (%.2f secs), resuming.",
-                      cachedDurationUs / 1E6);
-                play_l();
-            } else if (mFlags & PREPARING) {
-                ALOGV("cache has filled up (%.2f secs), prepare is done",
-                     cachedDurationUs / 1E6);
-                finishAsyncPrepare_l();
-            }
-        }
-    }
-
-    if (mFlags & (PLAYING | PREPARING | CACHE_UNDERRUN)) {
-        postBufferingEvent_l();
-    }
-}
-
-void AwesomePlayer::sendCacheStats() {
-    sp<MediaPlayerBase> listener = mListener.promote();
-    if (listener != NULL) {
-        int32_t kbps = 0;
-        status_t err = UNKNOWN_ERROR;
-        if (mCachedSource != NULL) {
-            err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
-        } else if (mWVMExtractor != NULL) {
-            err = mWVMExtractor->getEstimatedBandwidthKbps(&kbps);
-        }
-        if (err == OK) {
-            listener->sendEvent(
-                MEDIA_INFO, MEDIA_INFO_NETWORK_BANDWIDTH, kbps);
-        }
-    }
-}
-
-void AwesomePlayer::onStreamDone() {
-    // Posted whenever any stream finishes playing.
-    ATRACE_CALL();
-
-    Mutex::Autolock autoLock(mLock);
-    if (!mStreamDoneEventPending) {
-        return;
-    }
-    mStreamDoneEventPending = false;
-
-    if (mStreamDoneStatus != ERROR_END_OF_STREAM) {
-        ALOGV("MEDIA_ERROR %d", mStreamDoneStatus);
-
-        notifyListener_l(
-                MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, mStreamDoneStatus);
-
-        pause_l(true /* at eos */);
-
-        modifyFlags(AT_EOS, SET);
-        return;
-    }
-
-    const bool allDone =
-        (mVideoSource == NULL || (mFlags & VIDEO_AT_EOS))
-            && (mAudioSource == NULL || (mFlags & AUDIO_AT_EOS));
-
-    if (!allDone) {
-        return;
-    }
-
-    if (mFlags & AUTO_LOOPING) {
-        audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
-        if (mAudioSink != NULL) {
-            streamType = mAudioSink->getAudioStreamType();
-        }
-        if (streamType == AUDIO_STREAM_NOTIFICATION) {
-            ALOGW("disabling auto-loop for notification");
-            modifyFlags(AUTO_LOOPING, CLEAR);
-        }
-    }
-    if ((mFlags & LOOPING)
-            || (mFlags & AUTO_LOOPING)) {
-
-        seekTo_l(0);
-
-        if (mVideoSource != NULL) {
-            postVideoEvent_l();
-        }
-    } else {
-        ALOGV("MEDIA_PLAYBACK_COMPLETE");
-        notifyListener_l(MEDIA_PLAYBACK_COMPLETE);
-
-        pause_l(true /* at eos */);
-
-        // If audio hasn't completed MEDIA_SEEK_COMPLETE yet,
-        // notify MEDIA_SEEK_COMPLETE to observer immediately for state persistence.
-        if (mWatchForAudioSeekComplete) {
-            notifyListener_l(MEDIA_SEEK_COMPLETE);
-            mWatchForAudioSeekComplete = false;
-        }
-
-        modifyFlags(AT_EOS, SET);
-    }
-}
-
-status_t AwesomePlayer::play() {
-    ATRACE_CALL();
-
-    Mutex::Autolock autoLock(mLock);
-
-    modifyFlags(CACHE_UNDERRUN, CLEAR);
-
-    return play_l();
-}
-
-status_t AwesomePlayer::play_l() {
-    modifyFlags(SEEK_PREVIEW, CLEAR);
-
-    if (mFlags & PLAYING) {
-        return OK;
-    }
-
-    mMediaRenderingStartGeneration = ++mStartGeneration;
-
-    if (!(mFlags & PREPARED)) {
-        status_t err = prepare_l();
-
-        if (err != OK) {
-            return err;
-        }
-    }
-
-    modifyFlags(PLAYING, SET);
-    modifyFlags(FIRST_FRAME, SET);
-
-    if (mDecryptHandle != NULL) {
-        int64_t position;
-        getPosition(&position);
-        mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
-                Playback::START, position / 1000);
-    }
-
-    if (mAudioSource != NULL) {
-        if (mAudioPlayer == NULL) {
-            createAudioPlayer_l();
-        }
-
-        CHECK(!(mFlags & AUDIO_RUNNING));
-
-        if (mVideoSource == NULL) {
-
-            // We don't want to post an error notification at this point,
-            // the error returned from MediaPlayer::start() will suffice.
-
-            status_t err = startAudioPlayer_l(
-                    false /* sendErrorNotification */);
-
-            if ((err != OK) && mOffloadAudio) {
-                ALOGI("play_l() cannot create offload output, fallback to sw decode");
-                int64_t curTimeUs;
-                getPosition(&curTimeUs);
-
-                delete mAudioPlayer;
-                mAudioPlayer = NULL;
-                // if the player was started it will take care of stopping the source when destroyed
-                if (!(mFlags & AUDIOPLAYER_STARTED)) {
-                    mAudioSource->stop();
-                }
-                modifyFlags((AUDIO_RUNNING | AUDIOPLAYER_STARTED), CLEAR);
-                mOffloadAudio = false;
-                mAudioSource = mOmxSource;
-                if (mAudioSource != NULL) {
-                    err = mAudioSource->start();
-
-                    if (err != OK) {
-                        mAudioSource.clear();
-                    } else {
-                        mSeekNotificationSent = true;
-                        if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
-                            seekTo_l(curTimeUs);
-                        }
-                        createAudioPlayer_l();
-                        err = startAudioPlayer_l(false);
-                    }
-                }
-            }
-
-            if (err != OK) {
-                delete mAudioPlayer;
-                mAudioPlayer = NULL;
-
-                modifyFlags((PLAYING | FIRST_FRAME), CLEAR);
-
-                if (mDecryptHandle != NULL) {
-                    mDrmManagerClient->setPlaybackStatus(
-                            mDecryptHandle, Playback::STOP, 0);
-                }
-
-                return err;
-            }
-        }
-
-        if (mAudioPlayer != NULL) {
-            mAudioPlayer->setPlaybackRate(mPlaybackSettings);
-        }
-    }
-
-    if (mTimeSource == NULL && mAudioPlayer == NULL) {
-        mTimeSource = &mSystemTimeSource;
-    }
-
-    if (mVideoSource != NULL) {
-        // Kick off video playback
-        postVideoEvent_l();
-
-        if (mAudioSource != NULL && mVideoSource != NULL) {
-            postVideoLagEvent_l();
-        }
-    }
-
-    if (mFlags & AT_EOS) {
-        // Legacy behaviour, if a stream finishes playing and then
-        // is started again, we play from the start...
-        seekTo_l(0);
-    }
-
-    uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted
-        | IMediaPlayerService::kBatteryDataTrackDecoder;
-    if ((mAudioSource != NULL) && (mAudioSource != mAudioTrack)) {
-        params |= IMediaPlayerService::kBatteryDataTrackAudio;
-    }
-    if (mVideoSource != NULL) {
-        params |= IMediaPlayerService::kBatteryDataTrackVideo;
-    }
-    addBatteryData(params);
-
-    if (isStreamingHTTP()) {
-        postBufferingEvent_l();
-    }
-
-    return OK;
-}
-
-void AwesomePlayer::createAudioPlayer_l()
-{
-    uint32_t flags = 0;
-    int64_t cachedDurationUs;
-    bool eos;
-
-    if (mOffloadAudio) {
-        flags |= AudioPlayer::USE_OFFLOAD;
-    } else if (mVideoSource == NULL
-            && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
-            (getCachedDuration_l(&cachedDurationUs, &eos) &&
-            cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
-        flags |= AudioPlayer::ALLOW_DEEP_BUFFERING;
-    }
-    if (isStreamingHTTP()) {
-        flags |= AudioPlayer::IS_STREAMING;
-    }
-    if (mVideoSource != NULL) {
-        flags |= AudioPlayer::HAS_VIDEO;
-    }
-
-    mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);
-    mAudioPlayer->setSource(mAudioSource);
-
-    mTimeSource = mAudioPlayer;
-
-    // If there was a seek request before we ever started,
-    // honor the request now.
-    // Make sure to do this before starting the audio player
-    // to avoid a race condition.
-    seekAudioIfNecessary_l();
-}
-
-void AwesomePlayer::notifyIfMediaStarted_l() {
-    if (mMediaRenderingStartGeneration == mStartGeneration) {
-        mMediaRenderingStartGeneration = -1;
-        notifyListener_l(MEDIA_STARTED);
-    }
-}
-
-status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) {
-    CHECK(!(mFlags & AUDIO_RUNNING));
-    status_t err = OK;
-
-    if (mAudioSource == NULL || mAudioPlayer == NULL) {
-        return OK;
-    }
-
-    if (mOffloadAudio) {
-        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
-        mAudioTearDownEventPending = false;
-    }
-
-    if (!(mFlags & AUDIOPLAYER_STARTED)) {
-        bool wasSeeking = mAudioPlayer->isSeeking();
-
-        // We've already started the MediaSource in order to enable
-        // the prefetcher to read its data.
-        err = mAudioPlayer->start(
-                true /* sourceAlreadyStarted */);
-
-        if (err != OK) {
-            if (sendErrorNotification) {
-                notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
-            }
-
-            return err;
-        }
-
-        modifyFlags(AUDIOPLAYER_STARTED, SET);
-
-        if (wasSeeking) {
-            CHECK(!mAudioPlayer->isSeeking());
-
-            // We will have finished the seek while starting the audio player.
-            postAudioSeekComplete();
-        } else {
-            notifyIfMediaStarted_l();
-        }
-    } else {
-        err = mAudioPlayer->resume();
-    }
-
-    if (err == OK) {
-        err = mAudioPlayer->setPlaybackRate(mPlaybackSettings);
-    }
-
-    if (err == OK) {
-        modifyFlags(AUDIO_RUNNING, SET);
-
-        mWatchForAudioEOS = true;
-    }
-
-    return err;
-}
-
-void AwesomePlayer::notifyVideoSize_l() {
-    ATRACE_CALL();
-    sp<MetaData> meta = mVideoSource->getFormat();
-
-    int32_t cropLeft, cropTop, cropRight, cropBottom;
-    if (!meta->findRect(
-                kKeyCropRect, &cropLeft, &cropTop, &cropRight, &cropBottom)) {
-        int32_t width, height;
-        CHECK(meta->findInt32(kKeyWidth, &width));
-        CHECK(meta->findInt32(kKeyHeight, &height));
-
-        cropLeft = cropTop = 0;
-        cropRight = width - 1;
-        cropBottom = height - 1;
-
-        ALOGV("got dimensions only %d x %d", width, height);
-    } else {
-        ALOGV("got crop rect %d, %d, %d, %d",
-             cropLeft, cropTop, cropRight, cropBottom);
-    }
-
-    int32_t displayWidth;
-    if (meta->findInt32(kKeyDisplayWidth, &displayWidth)) {
-        ALOGV("Display width changed (%d=>%d)", mDisplayWidth, displayWidth);
-        mDisplayWidth = displayWidth;
-    }
-    int32_t displayHeight;
-    if (meta->findInt32(kKeyDisplayHeight, &displayHeight)) {
-        ALOGV("Display height changed (%d=>%d)", mDisplayHeight, displayHeight);
-        mDisplayHeight = displayHeight;
-    }
-
-    int32_t usableWidth = cropRight - cropLeft + 1;
-    int32_t usableHeight = cropBottom - cropTop + 1;
-    if (mDisplayWidth != 0) {
-        usableWidth = mDisplayWidth;
-    }
-    if (mDisplayHeight != 0) {
-        usableHeight = mDisplayHeight;
-    }
-
-    {
-        Mutex::Autolock autoLock(mStatsLock);
-        mStats.mVideoWidth = usableWidth;
-        mStats.mVideoHeight = usableHeight;
-    }
-
-    int32_t rotationDegrees;
-    if (!mVideoTrack->getFormat()->findInt32(
-                kKeyRotation, &rotationDegrees)) {
-        rotationDegrees = 0;
-    }
-
-    if (rotationDegrees == 90 || rotationDegrees == 270) {
-        notifyListener_l(
-                MEDIA_SET_VIDEO_SIZE, usableHeight, usableWidth);
-    } else {
-        notifyListener_l(
-                MEDIA_SET_VIDEO_SIZE, usableWidth, usableHeight);
-    }
-}
-
-void AwesomePlayer::initRenderer_l() {
-    ATRACE_CALL();
-
-    if (mNativeWindow == NULL) {
-        return;
-    }
-
-    sp<MetaData> meta = mVideoSource->getFormat();
-
-    int32_t format;
-    const char *component;
-    int32_t decodedWidth, decodedHeight;
-    CHECK(meta->findInt32(kKeyColorFormat, &format));
-    CHECK(meta->findCString(kKeyDecoderComponent, &component));
-    CHECK(meta->findInt32(kKeyWidth, &decodedWidth));
-    CHECK(meta->findInt32(kKeyHeight, &decodedHeight));
-
-    int32_t rotationDegrees;
-    if (!mVideoTrack->getFormat()->findInt32(
-                kKeyRotation, &rotationDegrees)) {
-        rotationDegrees = 0;
-    }
-
-    mVideoRenderer.clear();
-
-    // Must ensure that mVideoRenderer's destructor is actually executed
-    // before creating a new one.
-    IPCThreadState::self()->flushCommands();
-
-    // Even if set scaling mode fails, we will continue anyway
-    setVideoScalingMode_l(mVideoScalingMode);
-    if (USE_SURFACE_ALLOC
-            && !strncmp(component, "OMX.", 4)
-            && strncmp(component, "OMX.google.", 11)) {
-        // Hardware decoders avoid the CPU color conversion by decoding
-        // directly to ANativeBuffers, so we must use a renderer that
-        // just pushes those buffers to the ANativeWindow.
-        mVideoRenderer =
-            new AwesomeNativeWindowRenderer(mNativeWindow, rotationDegrees);
-    } else {
-        // Other decoders are instantiated locally and as a consequence
-        // allocate their buffers in local address space.  This renderer
-        // then performs a color conversion and copy to get the data
-        // into the ANativeBuffer.
-        sp<AMessage> format;
-        convertMetaDataToMessage(meta, &format);
-        mVideoRenderer = new AwesomeLocalRenderer(mNativeWindow, format);
-    }
-}
-
-status_t AwesomePlayer::pause() {
-    ATRACE_CALL();
-
-    Mutex::Autolock autoLock(mLock);
-
-    modifyFlags(CACHE_UNDERRUN, CLEAR);
-
-    return pause_l();
-}
-
-status_t AwesomePlayer::pause_l(bool at_eos) {
-    if (!(mFlags & PLAYING)) {
-        if (mAudioTearDown && mAudioTearDownWasPlaying) {
-            ALOGV("pause_l() during teardown and finishSetDataSource_l() mFlags %x" , mFlags);
-            mAudioTearDownWasPlaying = false;
-            notifyListener_l(MEDIA_PAUSED);
-            mMediaRenderingStartGeneration = ++mStartGeneration;
-        }
-        return OK;
-    }
-
-    notifyListener_l(MEDIA_PAUSED);
-    mMediaRenderingStartGeneration = ++mStartGeneration;
-
-    cancelPlayerEvents(true /* keepNotifications */);
-
-    if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
-        // If we played the audio stream to completion we
-        // want to make sure that all samples remaining in the audio
-        // track's queue are played out.
-        mAudioPlayer->pause(at_eos /* playPendingSamples */);
-        // send us a reminder to tear down the AudioPlayer if paused for too long.
-        if (mOffloadAudio) {
-            postAudioTearDownEvent(kOffloadPauseMaxUs);
-        }
-        modifyFlags(AUDIO_RUNNING, CLEAR);
-    }
-
-    if (mFlags & TEXTPLAYER_INITIALIZED) {
-        mTextDriver->pause();
-        modifyFlags(TEXT_RUNNING, CLEAR);
-    }
-
-    modifyFlags(PLAYING, CLEAR);
-
-    if (mDecryptHandle != NULL) {
-        mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
-                Playback::PAUSE, 0);
-    }
-
-    uint32_t params = IMediaPlayerService::kBatteryDataTrackDecoder;
-    if ((mAudioSource != NULL) && (mAudioSource != mAudioTrack)) {
-        params |= IMediaPlayerService::kBatteryDataTrackAudio;
-    }
-    if (mVideoSource != NULL) {
-        params |= IMediaPlayerService::kBatteryDataTrackVideo;
-    }
-
-    addBatteryData(params);
-
-    return OK;
-}
-
-bool AwesomePlayer::isPlaying() const {
-    return (mFlags & PLAYING) || (mFlags & CACHE_UNDERRUN);
-}
-
-status_t AwesomePlayer::setSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer) {
-    Mutex::Autolock autoLock(mLock);
-
-    status_t err;
-    if (bufferProducer != NULL) {
-        err = setNativeWindow_l(new Surface(bufferProducer));
-    } else {
-        err = setNativeWindow_l(NULL);
-    }
-
-    return err;
-}
-
-void AwesomePlayer::shutdownVideoDecoder_l() {
-    if (mVideoBuffer) {
-        mVideoBuffer->release();
-        mVideoBuffer = NULL;
-    }
-
-    mVideoSource->stop();
-
-    // The following hack is necessary to ensure that the OMX
-    // component is completely released by the time we may try
-    // to instantiate it again.
-    wp<MediaSource> tmp = mVideoSource;
-    mVideoSource.clear();
-    while (tmp.promote() != NULL) {
-        usleep(1000);
-    }
-    IPCThreadState::self()->flushCommands();
-    ALOGV("video decoder shutdown completed");
-}
-
-status_t AwesomePlayer::setNativeWindow_l(const sp<ANativeWindow> &native) {
-    mNativeWindow = native;
-
-    if (mVideoSource == NULL) {
-        return OK;
-    }
-
-    ALOGV("attempting to reconfigure to use new surface");
-
-    bool wasPlaying = (mFlags & PLAYING) != 0;
-
-    pause_l();
-    mVideoRenderer.clear();
-
-    shutdownVideoDecoder_l();
-
-    status_t err = initVideoDecoder();
-
-    if (err != OK) {
-        ALOGE("failed to reinstantiate video decoder after surface change.");
-        return err;
-    }
-
-    if (mLastVideoTimeUs >= 0) {
-        mSeeking = SEEK;
-        mSeekTimeUs = mLastVideoTimeUs;
-        modifyFlags((AT_EOS | AUDIO_AT_EOS | VIDEO_AT_EOS), CLEAR);
-    }
-
-    if (wasPlaying) {
-        play_l();
-    }
-
-    return OK;
-}
-
-void AwesomePlayer::setAudioSink(
-        const sp<MediaPlayerBase::AudioSink> &audioSink) {
-    Mutex::Autolock autoLock(mLock);
-
-    mAudioSink = audioSink;
-}
-
-status_t AwesomePlayer::setLooping(bool shouldLoop) {
-    Mutex::Autolock autoLock(mLock);
-
-    modifyFlags(LOOPING, CLEAR);
-
-    if (shouldLoop) {
-        modifyFlags(LOOPING, SET);
-    }
-
-    return OK;
-}
-
-status_t AwesomePlayer::getDuration(int64_t *durationUs) {
-    Mutex::Autolock autoLock(mMiscStateLock);
-
-    if (mDurationUs < 0) {
-        return UNKNOWN_ERROR;
-    }
-
-    *durationUs = mDurationUs;
-
-    return OK;
-}
-
-status_t AwesomePlayer::getPosition(int64_t *positionUs) {
-    if (mSeeking != NO_SEEK) {
-        *positionUs = mSeekTimeUs;
-    } else if (mVideoSource != NULL
-            && (mAudioPlayer == NULL || !(mFlags & VIDEO_AT_EOS))) {
-        Mutex::Autolock autoLock(mMiscStateLock);
-        *positionUs = mVideoTimeUs;
-    } else if (mAudioPlayer != NULL) {
-        *positionUs = mAudioPlayer->getMediaTimeUs();
-    } else {
-        *positionUs = 0;
-    }
-    return OK;
-}
-
-status_t AwesomePlayer::seekTo(int64_t timeUs) {
-    ATRACE_CALL();
-
-    if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
-        Mutex::Autolock autoLock(mLock);
-        return seekTo_l(timeUs);
-    }
-
-    return OK;
-}
-
-status_t AwesomePlayer::seekTo_l(int64_t timeUs) {
-    if (mFlags & CACHE_UNDERRUN) {
-        modifyFlags(CACHE_UNDERRUN, CLEAR);
-        play_l();
-    }
-
-    if ((mFlags & PLAYING) && mVideoSource != NULL && (mFlags & VIDEO_AT_EOS)) {
-        // Video playback completed before, there's no pending
-        // video event right now. In order for this new seek
-        // to be honored, we need to post one.
-
-        postVideoEvent_l();
-    }
-
-    mSeeking = SEEK;
-    mSeekNotificationSent = false;
-    mSeekTimeUs = timeUs;
-    modifyFlags((AT_EOS | AUDIO_AT_EOS | VIDEO_AT_EOS), CLEAR);
-
-    if (mFlags & PLAYING) {
-        notifyListener_l(MEDIA_PAUSED);
-        mMediaRenderingStartGeneration = ++mStartGeneration;
-    }
-
-    seekAudioIfNecessary_l();
-
-    if (mFlags & TEXTPLAYER_INITIALIZED) {
-        mTextDriver->seekToAsync(mSeekTimeUs);
-    }
-
-    if (!(mFlags & PLAYING)) {
-        ALOGV("seeking while paused, sending SEEK_COMPLETE notification"
-             " immediately.");
-
-        notifyListener_l(MEDIA_SEEK_COMPLETE);
-        mSeekNotificationSent = true;
-
-        if ((mFlags & PREPARED) && mVideoSource != NULL) {
-            modifyFlags(SEEK_PREVIEW, SET);
-            postVideoEvent_l();
-        }
-    }
-
-    return OK;
-}
-
-void AwesomePlayer::seekAudioIfNecessary_l() {
-    if (mSeeking != NO_SEEK && mVideoSource == NULL && mAudioPlayer != NULL) {
-        mAudioPlayer->seekTo(mSeekTimeUs);
-
-        mWatchForAudioSeekComplete = true;
-        mWatchForAudioEOS = true;
-
-        if (mDecryptHandle != NULL) {
-            mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
-                    Playback::PAUSE, 0);
-            mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
-                    Playback::START, mSeekTimeUs / 1000);
-        }
-    }
-}
-
-void AwesomePlayer::setAudioSource(sp<MediaSource> source) {
-    CHECK(source != NULL);
-
-    mAudioTrack = source;
-}
-
-void AwesomePlayer::addTextSource_l(size_t trackIndex, const sp<MediaSource>& source) {
-    CHECK(source != NULL);
-
-    if (mTextDriver == NULL) {
-        mTextDriver = new TimedTextDriver(mListener, mHTTPService);
-    }
-
-    mTextDriver->addInBandTextSource(trackIndex, source);
-}
-
-status_t AwesomePlayer::initAudioDecoder() {
-    ATRACE_CALL();
-
-    sp<MetaData> meta = mAudioTrack->getFormat();
-
-    const char *mime;
-    CHECK(meta->findCString(kKeyMIMEType, &mime));
-    // Check whether there is a hardware codec for this stream
-    // This doesn't guarantee that the hardware has a free stream
-    // but it avoids us attempting to open (and re-open) an offload
-    // stream to hardware that doesn't have the necessary codec
-    audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
-    if (mAudioSink != NULL) {
-        streamType = mAudioSink->getAudioStreamType();
-    }
-
-    mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL),
-                                     isStreamingHTTP(), streamType);
-
-    if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
-        ALOGV("createAudioPlayer: bypass OMX (raw)");
-        mAudioSource = mAudioTrack;
-    } else {
-        // If offloading we still create a OMX decoder as a fall-back
-        // but we don't start it
-        mOmxSource = OMXCodec::Create(
-                mClient.interface(), mAudioTrack->getFormat(),
-                false, // createEncoder
-                mAudioTrack);
-
-        if (mOffloadAudio) {
-            ALOGV("createAudioPlayer: bypass OMX (offload)");
-            mAudioSource = mAudioTrack;
-        } else {
-            mAudioSource = mOmxSource;
-        }
-    }
-
-    if (mAudioSource != NULL) {
-        int64_t durationUs;
-        if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
-            Mutex::Autolock autoLock(mMiscStateLock);
-            if (mDurationUs < 0 || durationUs > mDurationUs) {
-                mDurationUs = durationUs;
-            }
-        }
-
-        status_t err = mAudioSource->start();
-
-        if (err != OK) {
-            mAudioSource.clear();
-            mOmxSource.clear();
-            return err;
-        }
-    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
-        // For legacy reasons we're simply going to ignore the absence
-        // of an audio decoder for QCELP instead of aborting playback
-        // altogether.
-        return OK;
-    }
-
-    if (mAudioSource != NULL) {
-        Mutex::Autolock autoLock(mStatsLock);
-        TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
-        const char *component;
-        if (!mAudioSource->getFormat()
-                ->findCString(kKeyDecoderComponent, &component)) {
-            component = "none";
-        }
-
-        stat->mDecoderName = component;
-    }
-
-    return mAudioSource != NULL ? OK : UNKNOWN_ERROR;
-}
-
-void AwesomePlayer::setVideoSource(sp<MediaSource> source) {
-    CHECK(source != NULL);
-
-    mVideoTrack = source;
-}
-
-status_t AwesomePlayer::initVideoDecoder(uint32_t flags) {
-    ATRACE_CALL();
-
-    // Either the application or the DRM system can independently say
-    // that there must be a hardware-protected path to an external video sink.
-    // For now we always require a hardware-protected path to external video sink
-    // if content is DRMed, but eventually this could be optional per DRM agent.
-    // When the application wants protection, then
-    //   (USE_SURFACE_ALLOC && (mSurface != 0) &&
-    //   (mSurface->getFlags() & ISurfaceComposer::eProtectedByApp))
-    // will be true, but that part is already handled by SurfaceFlinger.
-
-#ifdef DEBUG_HDCP
-    // For debugging, we allow a system property to control the protected usage.
-    // In case of uninitialized or unexpected property, we default to "DRM only".
-    bool setProtectionBit = false;
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("persist.sys.hdcp_checking", value, NULL)) {
-        if (!strcmp(value, "never")) {
-            // nop
-        } else if (!strcmp(value, "always")) {
-            setProtectionBit = true;
-        } else if (!strcmp(value, "drm-only")) {
-            if (mDecryptHandle != NULL) {
-                setProtectionBit = true;
-            }
-        // property value is empty, or unexpected value
-        } else {
-            if (mDecryptHandle != NULL) {
-                setProtectionBit = true;
-            }
-        }
-    // can' read property value
-    } else {
-        if (mDecryptHandle != NULL) {
-            setProtectionBit = true;
-        }
-    }
-    // note that usage bit is already cleared, so no need to clear it in the "else" case
-    if (setProtectionBit) {
-        flags |= OMXCodec::kEnableGrallocUsageProtected;
-    }
-#else
-    if (mDecryptHandle != NULL) {
-        flags |= OMXCodec::kEnableGrallocUsageProtected;
-    }
-#endif
-    ALOGV("initVideoDecoder flags=0x%x", flags);
-    mVideoSource = OMXCodec::Create(
-            mClient.interface(), mVideoTrack->getFormat(),
-            false, // createEncoder
-            mVideoTrack,
-            NULL, flags, USE_SURFACE_ALLOC ? mNativeWindow : NULL);
-
-    if (mVideoSource != NULL) {
-        int64_t durationUs;
-        if (mVideoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
-            Mutex::Autolock autoLock(mMiscStateLock);
-            if (mDurationUs < 0 || durationUs > mDurationUs) {
-                mDurationUs = durationUs;
-            }
-        }
-
-        status_t err = mVideoSource->start();
-
-        if (err != OK) {
-            ALOGE("failed to start video source");
-            mVideoSource.clear();
-            return err;
-        }
-    }
-
-    if (mVideoSource != NULL) {
-        const char *componentName;
-        CHECK(mVideoSource->getFormat()
-                ->findCString(kKeyDecoderComponent, &componentName));
-
-        {
-            Mutex::Autolock autoLock(mStatsLock);
-            TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mVideoTrackIndex);
-
-            stat->mDecoderName = componentName;
-        }
-
-        static const char *kPrefix = "OMX.Nvidia.";
-        static const char *kSuffix = ".decode";
-        static const size_t kSuffixLength = strlen(kSuffix);
-
-        size_t componentNameLength = strlen(componentName);
-
-        if (!strncmp(componentName, kPrefix, strlen(kPrefix))
-                && componentNameLength >= kSuffixLength
-                && !strcmp(&componentName[
-                    componentNameLength - kSuffixLength], kSuffix)) {
-            modifyFlags(SLOW_DECODER_HACK, SET);
-        }
-    }
-
-    return mVideoSource != NULL ? OK : UNKNOWN_ERROR;
-}
-
-void AwesomePlayer::finishSeekIfNecessary(int64_t videoTimeUs) {
-    ATRACE_CALL();
-
-    if (mSeeking == SEEK_VIDEO_ONLY) {
-        mSeeking = NO_SEEK;
-        return;
-    }
-
-    if (mSeeking == NO_SEEK || (mFlags & SEEK_PREVIEW)) {
-        return;
-    }
-
-    // If we paused, then seeked, then resumed, it is possible that we have
-    // signaled SEEK_COMPLETE at a copmletely different media time than where
-    // we are now resuming.  Signal new position to media time provider.
-    // Cannot signal another SEEK_COMPLETE, as existing clients may not expect
-    // multiple SEEK_COMPLETE responses to a single seek() request.
-    if (mSeekNotificationSent && llabs((long long)(mSeekTimeUs - videoTimeUs)) > 10000) {
-        // notify if we are resuming more than 10ms away from desired seek time
-        notifyListener_l(MEDIA_SKIPPED);
-    }
-
-    if (mAudioPlayer != NULL) {
-        ALOGV("seeking audio to %" PRId64 " us (%.2f secs).", videoTimeUs, videoTimeUs / 1E6);
-
-        // If we don't have a video time, seek audio to the originally
-        // requested seek time instead.
-
-        mAudioPlayer->seekTo(videoTimeUs < 0 ? mSeekTimeUs : videoTimeUs);
-        mWatchForAudioSeekComplete = true;
-        mWatchForAudioEOS = true;
-    } else if (!mSeekNotificationSent) {
-        // If we're playing video only, report seek complete now,
-        // otherwise audio player will notify us later.
-        notifyListener_l(MEDIA_SEEK_COMPLETE);
-        mSeekNotificationSent = true;
-    }
-
-    modifyFlags(FIRST_FRAME, SET);
-    mSeeking = NO_SEEK;
-
-    if (mDecryptHandle != NULL) {
-        mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
-                Playback::PAUSE, 0);
-        mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
-                Playback::START, videoTimeUs / 1000);
-    }
-}
-
-void AwesomePlayer::onVideoEvent() {
-    ATRACE_CALL();
-    Mutex::Autolock autoLock(mLock);
-    if (!mVideoEventPending) {
-        // The event has been cancelled in reset_l() but had already
-        // been scheduled for execution at that time.
-        return;
-    }
-    mVideoEventPending = false;
-
-    if (mSeeking != NO_SEEK) {
-        if (mVideoBuffer) {
-            mVideoBuffer->release();
-            mVideoBuffer = NULL;
-        }
-
-        if (mSeeking == SEEK && isStreamingHTTP() && mAudioSource != NULL
-                && !(mFlags & SEEK_PREVIEW)) {
-            // We're going to seek the video source first, followed by
-            // the audio source.
-            // In order to avoid jumps in the DataSource offset caused by
-            // the audio codec prefetching data from the old locations
-            // while the video codec is already reading data from the new
-            // locations, we'll "pause" the audio source, causing it to
-            // stop reading input data until a subsequent seek.
-
-            if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
-                mAudioPlayer->pause();
-
-                modifyFlags(AUDIO_RUNNING, CLEAR);
-            }
-            mAudioSource->pause();
-        }
-    }
-
-    if (!mVideoBuffer) {
-        MediaSource::ReadOptions options;
-        if (mSeeking != NO_SEEK) {
-            ALOGV("seeking to %" PRId64 " us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);
-
-            options.setSeekTo(
-                    mSeekTimeUs,
-                    mSeeking == SEEK_VIDEO_ONLY
-                        ? MediaSource::ReadOptions::SEEK_NEXT_SYNC
-                        : MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
-        }
-        for (;;) {
-            status_t err = mVideoSource->read(&mVideoBuffer, &options);
-            options.clearSeekTo();
-
-            if (err != OK) {
-                CHECK(mVideoBuffer == NULL);
-
-                if (err == INFO_FORMAT_CHANGED) {
-                    ALOGV("VideoSource signalled format change.");
-
-                    notifyVideoSize_l();
-
-                    if (mVideoRenderer != NULL) {
-                        mVideoRendererIsPreview = false;
-                        initRenderer_l();
-                    }
-                    continue;
-                }
-
-                // So video playback is complete, but we may still have
-                // a seek request pending that needs to be applied
-                // to the audio track.
-                if (mSeeking != NO_SEEK) {
-                    ALOGV("video stream ended while seeking!");
-                }
-                finishSeekIfNecessary(-1);
-
-                if (mAudioPlayer != NULL
-                        && !(mFlags & (AUDIO_RUNNING | SEEK_PREVIEW))) {
-                    startAudioPlayer_l();
-                }
-
-                modifyFlags(VIDEO_AT_EOS, SET);
-                postStreamDoneEvent_l(err);
-                return;
-            }
-
-            if (mVideoBuffer->range_length() == 0) {
-                // Some decoders, notably the PV AVC software decoder
-                // return spurious empty buffers that we just want to ignore.
-
-                mVideoBuffer->release();
-                mVideoBuffer = NULL;
-                continue;
-            }
-
-            break;
-        }
-
-        {
-            Mutex::Autolock autoLock(mStatsLock);
-            ++mStats.mNumVideoFramesDecoded;
-        }
-    }
-
-    int64_t timeUs;
-    CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-
-    mLastVideoTimeUs = timeUs;
-
-    if (mSeeking == SEEK_VIDEO_ONLY) {
-        if (mSeekTimeUs > timeUs) {
-            ALOGI("XXX mSeekTimeUs = %" PRId64 " us, timeUs = %" PRId64 " us",
-                 mSeekTimeUs, timeUs);
-        }
-    }
-
-    {
-        Mutex::Autolock autoLock(mMiscStateLock);
-        mVideoTimeUs = timeUs;
-    }
-
-    SeekType wasSeeking = mSeeking;
-    finishSeekIfNecessary(timeUs);
-
-    if (mAudioPlayer != NULL && !(mFlags & (AUDIO_RUNNING | SEEK_PREVIEW))) {
-        status_t err = startAudioPlayer_l();
-        if (err != OK) {
-            ALOGE("Starting the audio player failed w/ err %d", err);
-            return;
-        }
-    }
-
-    if ((mFlags & TEXTPLAYER_INITIALIZED)
-            && !(mFlags & (TEXT_RUNNING | SEEK_PREVIEW))) {
-        mTextDriver->start();
-        modifyFlags(TEXT_RUNNING, SET);
-    }
-
-    TimeSource *ts =
-        ((mFlags & AUDIO_AT_EOS) || !(mFlags & AUDIOPLAYER_STARTED))
-            ? &mSystemTimeSource : mTimeSource;
-    int64_t systemTimeUs = mSystemTimeSource.getRealTimeUs();
-    int64_t looperTimeUs = ALooper::GetNowUs();
-
-    if (mFlags & FIRST_FRAME) {
-        modifyFlags(FIRST_FRAME, CLEAR);
-        mSinceLastDropped = 0;
-        mClockEstimator->reset();
-        mTimeSourceDeltaUs = estimateRealTimeUs(ts, systemTimeUs) - timeUs;
-    }
-
-    int64_t realTimeUs, mediaTimeUs;
-    if (!(mFlags & AUDIO_AT_EOS) && mAudioPlayer != NULL
-        && mAudioPlayer->getMediaTimeMapping(&realTimeUs, &mediaTimeUs)) {
-        ALOGV("updating TSdelta (%" PRId64 " => %" PRId64 " change %" PRId64 ")",
-              mTimeSourceDeltaUs, realTimeUs - mediaTimeUs,
-              mTimeSourceDeltaUs - (realTimeUs - mediaTimeUs));
-        ATRACE_INT("TS delta change (ms)", (mTimeSourceDeltaUs - (realTimeUs - mediaTimeUs)) / 1E3);
-        mTimeSourceDeltaUs = realTimeUs - mediaTimeUs;
-    }
-
-    if (wasSeeking == SEEK_VIDEO_ONLY) {
-        int64_t nowUs = estimateRealTimeUs(ts, systemTimeUs) - mTimeSourceDeltaUs;
-
-        int64_t latenessUs = nowUs - timeUs;
-
-        ATRACE_INT("Video Lateness (ms)", latenessUs / 1E3);
-
-        if (latenessUs > 0) {
-            ALOGI("after SEEK_VIDEO_ONLY we're late by %.2f secs", latenessUs / 1E6);
-        }
-    }
-
-    int64_t latenessUs = 0;
-    if (wasSeeking == NO_SEEK) {
-        // Let's display the first frame after seeking right away.
-
-        int64_t nowUs = estimateRealTimeUs(ts, systemTimeUs) - mTimeSourceDeltaUs;
-
-        latenessUs = nowUs - timeUs;
-
-        ATRACE_INT("Video Lateness (ms)", latenessUs / 1E3);
-
-        if (latenessUs > 500000ll
-                && mAudioPlayer != NULL
-                && mAudioPlayer->getMediaTimeMapping(
-                    &realTimeUs, &mediaTimeUs)) {
-            if (mWVMExtractor == NULL) {
-                ALOGI("we're much too late (%.2f secs), video skipping ahead",
-                     latenessUs / 1E6);
-
-                mVideoBuffer->release();
-                mVideoBuffer = NULL;
-
-                mSeeking = SEEK_VIDEO_ONLY;
-                mSeekTimeUs = mediaTimeUs;
-
-                postVideoEvent_l();
-                return;
-            } else {
-                // The widevine extractor doesn't deal well with seeking
-                // audio and video independently. We'll just have to wait
-                // until the decoder catches up, which won't be long at all.
-                ALOGI("we're very late (%.2f secs)", latenessUs / 1E6);
-            }
-        }
-
-        if (latenessUs > 40000) {
-            // We're more than 40ms late.
-            ALOGV("we're late by %" PRId64 " us (%.2f secs)",
-                 latenessUs, latenessUs / 1E6);
-
-            if (!(mFlags & SLOW_DECODER_HACK)
-                    || mSinceLastDropped > FRAME_DROP_FREQ)
-            {
-                ALOGV("we're late by %" PRId64 " us (%.2f secs) dropping "
-                     "one after %d frames",
-                     latenessUs, latenessUs / 1E6, mSinceLastDropped);
-
-                mSinceLastDropped = 0;
-                mVideoBuffer->release();
-                mVideoBuffer = NULL;
-
-                {
-                    Mutex::Autolock autoLock(mStatsLock);
-                    ++mStats.mNumVideoFramesDropped;
-                }
-
-                postVideoEvent_l(0);
-                return;
-            }
-        }
-
-        if (latenessUs < -30000) {
-            // We're more than 30ms early, schedule at most 20 ms before time due
-            postVideoEvent_l(latenessUs < -60000 ? 30000 : -latenessUs - 20000);
-            return;
-        }
-    }
-
-    if ((mNativeWindow != NULL)
-            && (mVideoRendererIsPreview || mVideoRenderer == NULL)) {
-        mVideoRendererIsPreview = false;
-
-        initRenderer_l();
-    }
-
-    if (mVideoRenderer != NULL) {
-        mSinceLastDropped++;
-        mVideoBuffer->meta_data()->setInt64(kKeyTime, looperTimeUs - latenessUs);
-
-        mVideoRenderer->render(mVideoBuffer);
-        if (!mVideoRenderingStarted) {
-            mVideoRenderingStarted = true;
-            notifyListener_l(MEDIA_INFO, MEDIA_INFO_RENDERING_START);
-        }
-
-        if (mFlags & PLAYING) {
-            notifyIfMediaStarted_l();
-        }
-    }
-
-    mVideoBuffer->release();
-    mVideoBuffer = NULL;
-
-    if (wasSeeking != NO_SEEK && (mFlags & SEEK_PREVIEW)) {
-        modifyFlags(SEEK_PREVIEW, CLEAR);
-        return;
-    }
-
-    /* get next frame time */
-    if (wasSeeking == NO_SEEK) {
-        MediaSource::ReadOptions options;
-        for (;;) {
-            status_t err = mVideoSource->read(&mVideoBuffer, &options);
-            if (err != OK) {
-                // deal with any errors next time
-                CHECK(mVideoBuffer == NULL);
-                postVideoEvent_l(0);
-                return;
-            }
-
-            if (mVideoBuffer->range_length() != 0) {
-                break;
-            }
-
-            // Some decoders, notably the PV AVC software decoder
-            // return spurious empty buffers that we just want to ignore.
-
-            mVideoBuffer->release();
-            mVideoBuffer = NULL;
-        }
-
-        {
-            Mutex::Autolock autoLock(mStatsLock);
-            ++mStats.mNumVideoFramesDecoded;
-        }
-
-        int64_t nextTimeUs;
-        CHECK(mVideoBuffer->meta_data()->findInt64(kKeyTime, &nextTimeUs));
-        systemTimeUs = mSystemTimeSource.getRealTimeUs();
-        int64_t delayUs = nextTimeUs - estimateRealTimeUs(ts, systemTimeUs) + mTimeSourceDeltaUs;
-        ATRACE_INT("Frame delta (ms)", (nextTimeUs - timeUs) / 1E3);
-        ALOGV("next frame in %" PRId64, delayUs);
-        // try to schedule 30ms before time due
-        postVideoEvent_l(delayUs > 60000 ? 30000 : (delayUs < 30000 ? 0 : delayUs - 30000));
-        return;
-    }
-
-    postVideoEvent_l();
-}
-
-int64_t AwesomePlayer::estimateRealTimeUs(TimeSource *ts, int64_t systemTimeUs) {
-    if (ts == &mSystemTimeSource) {
-        return systemTimeUs;
-    } else {
-        return (int64_t)mClockEstimator->estimate(systemTimeUs, ts->getRealTimeUs());
-    }
-}
-
-void AwesomePlayer::postVideoEvent_l(int64_t delayUs) {
-    ATRACE_CALL();
-
-    if (mVideoEventPending) {
-        return;
-    }
-
-    mVideoEventPending = true;
-    mQueue.postEventWithDelay(mVideoEvent, delayUs < 0 ? 10000 : delayUs);
-}
-
-void AwesomePlayer::postStreamDoneEvent_l(status_t status) {
-    if (mStreamDoneEventPending) {
-        return;
-    }
-    mStreamDoneEventPending = true;
-
-    mStreamDoneStatus = status;
-    mQueue.postEvent(mStreamDoneEvent);
-}
-
-void AwesomePlayer::postBufferingEvent_l() {
-    if (mBufferingEventPending) {
-        return;
-    }
-    mBufferingEventPending = true;
-    mQueue.postEventWithDelay(mBufferingEvent, 1000000ll);
-}
-
-void AwesomePlayer::postVideoLagEvent_l() {
-    if (mVideoLagEventPending) {
-        return;
-    }
-    mVideoLagEventPending = true;
-    mQueue.postEventWithDelay(mVideoLagEvent, 1000000ll);
-}
-
-void AwesomePlayer::postCheckAudioStatusEvent(int64_t delayUs) {
-    Mutex::Autolock autoLock(mAudioLock);
-    if (mAudioStatusEventPending) {
-        return;
-    }
-    mAudioStatusEventPending = true;
-    // Do not honor delay when looping in order to limit audio gap
-    if (mFlags & (LOOPING | AUTO_LOOPING)) {
-        delayUs = 0;
-    }
-    mQueue.postEventWithDelay(mCheckAudioStatusEvent, delayUs);
-}
-
-void AwesomePlayer::postAudioTearDownEvent(int64_t delayUs) {
-    Mutex::Autolock autoLock(mAudioLock);
-    if (mAudioTearDownEventPending) {
-        return;
-    }
-    mAudioTearDownEventPending = true;
-    mQueue.postEventWithDelay(mAudioTearDownEvent, delayUs);
-}
-
-void AwesomePlayer::onCheckAudioStatus() {
-    {
-        Mutex::Autolock autoLock(mAudioLock);
-        if (!mAudioStatusEventPending) {
-            // Event was dispatched and while we were blocking on the mutex,
-            // has already been cancelled.
-            return;
-        }
-
-        mAudioStatusEventPending = false;
-    }
-
-    Mutex::Autolock autoLock(mLock);
-
-    if (mWatchForAudioSeekComplete && !mAudioPlayer->isSeeking()) {
-        mWatchForAudioSeekComplete = false;
-
-        if (!mSeekNotificationSent) {
-            notifyListener_l(MEDIA_SEEK_COMPLETE);
-            mSeekNotificationSent = true;
-        }
-
-        if (mVideoSource == NULL) {
-            // For video the mSeeking flag is always reset in finishSeekIfNecessary
-            mSeeking = NO_SEEK;
-        }
-
-        notifyIfMediaStarted_l();
-    }
-
-    status_t finalStatus;
-    if (mWatchForAudioEOS && mAudioPlayer->reachedEOS(&finalStatus)) {
-        mWatchForAudioEOS = false;
-        modifyFlags(AUDIO_AT_EOS, SET);
-        modifyFlags(FIRST_FRAME, SET);
-        postStreamDoneEvent_l(finalStatus);
-    }
-}
-
-status_t AwesomePlayer::prepare() {
-    ATRACE_CALL();
-    Mutex::Autolock autoLock(mLock);
-    return prepare_l();
-}
-
-status_t AwesomePlayer::prepare_l() {
-    if (mFlags & PREPARED) {
-        return OK;
-    }
-
-    if (mFlags & PREPARING) {
-        return UNKNOWN_ERROR;
-    }
-
-    mIsAsyncPrepare = false;
-    status_t err = prepareAsync_l();
-
-    if (err != OK) {
-        return err;
-    }
-
-    while (mFlags & PREPARING) {
-        mPreparedCondition.wait(mLock);
-    }
-
-    return mPrepareResult;
-}
-
-status_t AwesomePlayer::prepareAsync() {
-    ATRACE_CALL();
-    Mutex::Autolock autoLock(mLock);
-
-    if (mFlags & PREPARING) {
-        return UNKNOWN_ERROR;  // async prepare already pending
-    }
-
-    mIsAsyncPrepare = true;
-    return prepareAsync_l();
-}
-
-status_t AwesomePlayer::prepareAsync_l() {
-    if (mFlags & PREPARING) {
-        return UNKNOWN_ERROR;  // async prepare already pending
-    }
-
-    if (!mQueueStarted) {
-        mQueue.start();
-        mQueueStarted = true;
-    }
-
-    modifyFlags(PREPARING, SET);
-    mAsyncPrepareEvent = new AwesomeEvent(
-            this, &AwesomePlayer::onPrepareAsyncEvent);
-
-    mQueue.postEvent(mAsyncPrepareEvent);
-
-    return OK;
-}
-
-status_t AwesomePlayer::finishSetDataSource_l() {
-    ATRACE_CALL();
-    sp<DataSource> dataSource;
-
-    bool isWidevineStreaming = false;
-    if (!strncasecmp("widevine://", mUri.string(), 11)) {
-        isWidevineStreaming = true;
-
-        String8 newURI = String8("http://");
-        newURI.append(mUri.string() + 11);
-
-        mUri = newURI;
-    }
-
-    AString sniffedMIME;
-
-    if (!strncasecmp("http://", mUri.string(), 7)
-            || !strncasecmp("https://", mUri.string(), 8)
-            || isWidevineStreaming) {
-        if (mHTTPService == NULL) {
-            ALOGE("Attempt to play media from http URI without HTTP service.");
-            return UNKNOWN_ERROR;
-        }
-
-        sp<IMediaHTTPConnection> conn = mHTTPService->makeHTTPConnection();
-        mConnectingDataSource = new MediaHTTP(conn);
-
-        String8 cacheConfig;
-        bool disconnectAtHighwatermark;
-        NuCachedSource2::RemoveCacheSpecificHeaders(
-                &mUriHeaders, &cacheConfig, &disconnectAtHighwatermark);
-
-        mLock.unlock();
-        status_t err = mConnectingDataSource->connect(mUri, &mUriHeaders);
-        // force connection at this point, to avoid a race condition between getMIMEType and the
-        // caching datasource constructed below, which could result in multiple requests to the
-        // server, and/or failed connections.
-        String8 contentType = mConnectingDataSource->getMIMEType();
-        mLock.lock();
-
-        if (err != OK) {
-            mConnectingDataSource.clear();
-
-            ALOGI("mConnectingDataSource->connect() returned %d", err);
-            return err;
-        }
-
-        if (!isWidevineStreaming) {
-            // The widevine extractor does its own caching.
-
-#if 0
-            mCachedSource = NuCachedSource2::Create(
-                    new ThrottledSource(
-                        mConnectingDataSource, 50 * 1024 /* bytes/sec */));
-#else
-            mCachedSource = NuCachedSource2::Create(
-                    mConnectingDataSource,
-                    cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
-                    disconnectAtHighwatermark);
-#endif
-
-            dataSource = mCachedSource;
-        } else {
-            dataSource = mConnectingDataSource;
-        }
-
-        mConnectingDataSource.clear();
-
-        if (strncasecmp(contentType.string(), "audio/", 6)) {
-            // We're not doing this for streams that appear to be audio-only
-            // streams to ensure that even low bandwidth streams start
-            // playing back fairly instantly.
-
-            // We're going to prefill the cache before trying to instantiate
-            // the extractor below, as the latter is an operation that otherwise
-            // could block on the datasource for a significant amount of time.
-            // During that time we'd be unable to abort the preparation phase
-            // without this prefill.
-            if (mCachedSource != NULL) {
-                // We're going to prefill the cache before trying to instantiate
-                // the extractor below, as the latter is an operation that otherwise
-                // could block on the datasource for a significant amount of time.
-                // During that time we'd be unable to abort the preparation phase
-                // without this prefill.
-
-                mLock.unlock();
-
-                // Initially make sure we have at least 192 KB for the sniff
-                // to complete without blocking.
-                static const size_t kMinBytesForSniffing = 192 * 1024;
-
-                off64_t metaDataSize = -1ll;
-                for (;;) {
-                    status_t finalStatus;
-                    size_t cachedDataRemaining =
-                        mCachedSource->approxDataRemaining(&finalStatus);
-
-                    if (finalStatus != OK
-                            || (metaDataSize >= 0
-                                && (off64_t)cachedDataRemaining >= metaDataSize)
-                            || (mFlags & PREPARE_CANCELLED)) {
-                        break;
-                    }
-
-                    ALOGV("now cached %zu bytes of data", cachedDataRemaining);
-
-                    if (metaDataSize < 0
-                            && cachedDataRemaining >= kMinBytesForSniffing) {
-                        String8 tmp;
-                        float confidence;
-                        sp<AMessage> meta;
-                        if (!dataSource->sniff(&tmp, &confidence, &meta)) {
-                            mLock.lock();
-                            return UNKNOWN_ERROR;
-                        }
-
-                        // We successfully identified the file's extractor to
-                        // be, remember this mime type so we don't have to
-                        // sniff it again when we call MediaExtractor::Create()
-                        // below.
-                        sniffedMIME = tmp.string();
-
-                        if (meta == NULL
-                                || !meta->findInt64("meta-data-size",
-                                     reinterpret_cast<int64_t*>(&metaDataSize))) {
-                            metaDataSize = kHighWaterMarkBytes;
-                        }
-
-                        CHECK_GE(metaDataSize, 0ll);
-                        ALOGV("metaDataSize = %lld bytes", (long long)metaDataSize);
-                    }
-
-                    usleep(200000);
-                }
-
-                mLock.lock();
-            }
-
-            if (mFlags & PREPARE_CANCELLED) {
-                ALOGI("Prepare cancelled while waiting for initial cache fill.");
-                return UNKNOWN_ERROR;
-            }
-        }
-    } else {
-        dataSource = DataSource::CreateFromURI(
-                mHTTPService, mUri.string(), &mUriHeaders);
-    }
-
-    if (dataSource == NULL) {
-        return UNKNOWN_ERROR;
-    }
-
-    sp<MediaExtractor> extractor;
-
-    if (isWidevineStreaming) {
-        String8 mimeType;
-        float confidence;
-        sp<AMessage> dummy;
-        bool success;
-
-        // SniffWVM is potentially blocking since it may require network access.
-        // Do not call it with mLock held.
-        mLock.unlock();
-        success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
-        mLock.lock();
-
-        if (!success
-                || strcasecmp(
-                    mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
-            return ERROR_UNSUPPORTED;
-        }
-
-        mWVMExtractor = new WVMExtractor(dataSource);
-        mWVMExtractor->setAdaptiveStreamingMode(true);
-        if (mUIDValid)
-            mWVMExtractor->setUID(mUID);
-        extractor = mWVMExtractor;
-    } else {
-        extractor = MediaExtractor::Create(
-                dataSource, sniffedMIME.empty() ? NULL : sniffedMIME.c_str());
-
-        if (extractor == NULL) {
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    if (extractor->getDrmFlag()) {
-        checkDrmStatus(dataSource);
-    }
-
-    status_t err = setDataSource_l(extractor);
-
-    if (err != OK) {
-        mWVMExtractor.clear();
-
-        return err;
-    }
-
-    return OK;
-}
-
-void AwesomePlayer::abortPrepare(status_t err) {
-    CHECK(err != OK);
-
-    if (mIsAsyncPrepare) {
-        notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
-    }
-
-    mPrepareResult = err;
-    modifyFlags((PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED), CLEAR);
-    mAsyncPrepareEvent = NULL;
-    mPreparedCondition.broadcast();
-    mAudioTearDown = false;
-}
-
-// static
-bool AwesomePlayer::ContinuePreparation(void *cookie) {
-    AwesomePlayer *me = static_cast<AwesomePlayer *>(cookie);
-
-    return (me->mFlags & PREPARE_CANCELLED) == 0;
-}
-
-void AwesomePlayer::onPrepareAsyncEvent() {
-    Mutex::Autolock autoLock(mLock);
-    beginPrepareAsync_l();
-}
-
-void AwesomePlayer::beginPrepareAsync_l() {
-    if (mFlags & PREPARE_CANCELLED) {
-        ALOGI("prepare was cancelled before doing anything");
-        abortPrepare(UNKNOWN_ERROR);
-        return;
-    }
-
-    if (mUri.size() > 0) {
-        status_t err = finishSetDataSource_l();
-
-        if (err != OK) {
-            abortPrepare(err);
-            return;
-        }
-    }
-
-    if (mVideoTrack != NULL && mVideoSource == NULL) {
-        status_t err = initVideoDecoder();
-
-        if (err != OK) {
-            abortPrepare(err);
-            return;
-        }
-    }
-
-    if (mAudioTrack != NULL && mAudioSource == NULL) {
-        status_t err = initAudioDecoder();
-
-        if (err != OK) {
-            abortPrepare(err);
-            return;
-        }
-    }
-
-    modifyFlags(PREPARING_CONNECTED, SET);
-
-    if (isStreamingHTTP()) {
-        postBufferingEvent_l();
-    } else {
-        finishAsyncPrepare_l();
-    }
-}
-
-void AwesomePlayer::finishAsyncPrepare_l() {
-    if (mIsAsyncPrepare) {
-        if (mVideoSource == NULL) {
-            notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0);
-        } else {
-            notifyVideoSize_l();
-        }
-
-        notifyListener_l(MEDIA_PREPARED);
-    }
-
-    mPrepareResult = OK;
-    modifyFlags((PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED), CLEAR);
-    modifyFlags(PREPARED, SET);
-    mAsyncPrepareEvent = NULL;
-    mPreparedCondition.broadcast();
-
-    if (mAudioTearDown) {
-        if (mPrepareResult == OK) {
-            if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
-                seekTo_l(mAudioTearDownPosition);
-            }
-
-            if (mAudioTearDownWasPlaying) {
-                modifyFlags(CACHE_UNDERRUN, CLEAR);
-                play_l();
-            }
-        }
-        mAudioTearDown = false;
-    }
-}
-
-uint32_t AwesomePlayer::flags() const {
-    return mExtractorFlags;
-}
-
-void AwesomePlayer::postAudioEOS(int64_t delayUs) {
-    postCheckAudioStatusEvent(delayUs);
-}
-
-void AwesomePlayer::postAudioSeekComplete() {
-    postCheckAudioStatusEvent(0);
-}
-
-void AwesomePlayer::postAudioTearDown() {
-    postAudioTearDownEvent(0);
-}
-
-status_t AwesomePlayer::setParameter(int key, const Parcel &request) {
-    switch (key) {
-        case KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS:
-        {
-            return setCacheStatCollectFreq(request);
-        }
-        default:
-        {
-            return ERROR_UNSUPPORTED;
-        }
-    }
-}
-
-status_t AwesomePlayer::setCacheStatCollectFreq(const Parcel &request) {
-    if (mCachedSource != NULL) {
-        int32_t freqMs = request.readInt32();
-        ALOGD("Request to keep cache stats in the past %d ms",
-            freqMs);
-        return mCachedSource->setCacheStatCollectFreq(freqMs);
-    }
-    return ERROR_UNSUPPORTED;
-}
-
-status_t AwesomePlayer::getParameter(int key, Parcel *reply) {
-    switch (key) {
-    case KEY_PARAMETER_AUDIO_CHANNEL_COUNT:
-        {
-            int32_t channelCount;
-            if (mAudioTrack == 0 ||
-                    !mAudioTrack->getFormat()->findInt32(kKeyChannelCount, &channelCount)) {
-                channelCount = 0;
-            }
-            reply->writeInt32(channelCount);
-        }
-        return OK;
-    default:
-        {
-            return ERROR_UNSUPPORTED;
-        }
-    }
-}
-
-status_t AwesomePlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
-    Mutex::Autolock autoLock(mLock);
-    // cursory sanity check for non-audio and paused cases
-    if ((rate.mSpeed != 0.f && rate.mSpeed < AUDIO_TIMESTRETCH_SPEED_MIN)
-        || rate.mSpeed > AUDIO_TIMESTRETCH_SPEED_MAX
-        || rate.mPitch < AUDIO_TIMESTRETCH_SPEED_MIN
-        || rate.mPitch > AUDIO_TIMESTRETCH_SPEED_MAX) {
-        return BAD_VALUE;
-    }
-
-    status_t err = OK;
-    if (rate.mSpeed == 0.f) {
-        if (mFlags & PLAYING) {
-            modifyFlags(CACHE_UNDERRUN, CLEAR); // same as pause
-            err = pause_l();
-        }
-        if (err == OK) {
-            // save settings (using old speed) in case player is resumed
-            AudioPlaybackRate newRate = rate;
-            newRate.mSpeed = mPlaybackSettings.mSpeed;
-            mPlaybackSettings = newRate;
-        }
-        return err;
-    }
-    if (mAudioPlayer != NULL) {
-        err = mAudioPlayer->setPlaybackRate(rate);
-    }
-    if (err == OK) {
-        mPlaybackSettings = rate;
-        if (!(mFlags & PLAYING)) {
-            play_l();
-        }
-    }
-    return err;
-}
-
-status_t AwesomePlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
-    if (mAudioPlayer != NULL) {
-        status_t err = mAudioPlayer->getPlaybackRate(rate);
-        if (err == OK) {
-            mPlaybackSettings = *rate;
-            Mutex::Autolock autoLock(mLock);
-            if (!(mFlags & PLAYING)) {
-                rate->mSpeed = 0.f;
-            }
-        }
-        return err;
-    }
-    *rate = mPlaybackSettings;
-    return OK;
-}
-
-status_t AwesomePlayer::getTrackInfo(Parcel *reply) const {
-    Mutex::Autolock autoLock(mLock);
-    size_t trackCount = mExtractor->countTracks();
-    if (mTextDriver != NULL) {
-        trackCount += mTextDriver->countExternalTracks();
-    }
-
-    reply->writeInt32(trackCount);
-    for (size_t i = 0; i < mExtractor->countTracks(); ++i) {
-        sp<MetaData> meta = mExtractor->getTrackMetaData(i);
-
-        const char *_mime;
-        CHECK(meta->findCString(kKeyMIMEType, &_mime));
-
-        String8 mime = String8(_mime);
-
-        reply->writeInt32(2); // 2 fields
-
-        if (!strncasecmp(mime.string(), "video/", 6)) {
-            reply->writeInt32(MEDIA_TRACK_TYPE_VIDEO);
-        } else if (!strncasecmp(mime.string(), "audio/", 6)) {
-            reply->writeInt32(MEDIA_TRACK_TYPE_AUDIO);
-        } else if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_TEXT_3GPP)) {
-            reply->writeInt32(MEDIA_TRACK_TYPE_TIMEDTEXT);
-        } else {
-            reply->writeInt32(MEDIA_TRACK_TYPE_UNKNOWN);
-        }
-
-        const char *lang;
-        if (!meta->findCString(kKeyMediaLanguage, &lang)) {
-            lang = "und";
-        }
-        reply->writeString16(String16(lang));
-    }
-
-    if (mTextDriver != NULL) {
-        mTextDriver->getExternalTrackInfo(reply);
-    }
-    return OK;
-}
-
-status_t AwesomePlayer::selectAudioTrack_l(
-        const sp<MediaSource>& source, size_t trackIndex) {
-
-    ALOGI("selectAudioTrack_l: trackIndex=%zu, mFlags=0x%x", trackIndex, mFlags);
-
-    {
-        Mutex::Autolock autoLock(mStatsLock);
-        if ((ssize_t)trackIndex == mActiveAudioTrackIndex) {
-            ALOGI("Track %zu is active. Does nothing.", trackIndex);
-            return OK;
-        }
-        //mStats.mFlags = mFlags;
-    }
-
-    if (mSeeking != NO_SEEK) {
-        ALOGE("Selecting a track while seeking is not supported");
-        return ERROR_UNSUPPORTED;
-    }
-
-    if ((mFlags & PREPARED) == 0) {
-        ALOGE("Data source has not finished preparation");
-        return ERROR_UNSUPPORTED;
-    }
-
-    CHECK(source != NULL);
-    bool wasPlaying = (mFlags & PLAYING) != 0;
-
-    pause_l();
-
-    int64_t curTimeUs;
-    CHECK_EQ(getPosition(&curTimeUs), (status_t)OK);
-
-    if ((mAudioPlayer == NULL || !(mFlags & AUDIOPLAYER_STARTED))
-            && mAudioSource != NULL) {
-        // If we had an audio player, it would have effectively
-        // taken possession of the audio source and stopped it when
-        // _it_ is stopped. Otherwise this is still our responsibility.
-        mAudioSource->stop();
-    }
-    mAudioSource.clear();
-    mOmxSource.clear();
-
-    mTimeSource = NULL;
-
-    delete mAudioPlayer;
-    mAudioPlayer = NULL;
-
-    modifyFlags(AUDIOPLAYER_STARTED, CLEAR);
-
-    setAudioSource(source);
-
-    modifyFlags(AUDIO_AT_EOS, CLEAR);
-    modifyFlags(AT_EOS, CLEAR);
-
-    status_t err;
-    if ((err = initAudioDecoder()) != OK) {
-        ALOGE("Failed to init audio decoder: 0x%x", err);
-        return err;
-    }
-
-    mSeekNotificationSent = true;
-    seekTo_l(curTimeUs);
-
-    if (wasPlaying) {
-        play_l();
-    }
-
-    mActiveAudioTrackIndex = trackIndex;
-
-    return OK;
-}
-
-status_t AwesomePlayer::selectTrack(size_t trackIndex, bool select) {
-    ATRACE_CALL();
-    ALOGV("selectTrack: trackIndex = %zu and select=%d", trackIndex, select);
-    Mutex::Autolock autoLock(mLock);
-    size_t trackCount = mExtractor->countTracks();
-    if (mTextDriver != NULL) {
-        trackCount += mTextDriver->countExternalTracks();
-    }
-    if (trackIndex >= trackCount) {
-        ALOGE("Track index (%zu) is out of range [0, %zu)", trackIndex, trackCount);
-        return ERROR_OUT_OF_RANGE;
-    }
-
-    bool isAudioTrack = false;
-    if (trackIndex < mExtractor->countTracks()) {
-        sp<MetaData> meta = mExtractor->getTrackMetaData(trackIndex);
-        const char *mime;
-        CHECK(meta->findCString(kKeyMIMEType, &mime));
-        isAudioTrack = !strncasecmp(mime, "audio/", 6);
-
-        if (!isAudioTrack && strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) != 0) {
-            ALOGE("Track %zu is not either audio or timed text", trackIndex);
-            return ERROR_UNSUPPORTED;
-        }
-    }
-
-    if (isAudioTrack) {
-        if (!select) {
-            ALOGE("Deselect an audio track (%zu) is not supported", trackIndex);
-            return ERROR_UNSUPPORTED;
-        }
-        return selectAudioTrack_l(mExtractor->getTrack(trackIndex), trackIndex);
-    }
-
-    // Timed text track handling
-    if (mTextDriver == NULL) {
-        return INVALID_OPERATION;
-    }
-
-    status_t err = OK;
-    if (select) {
-        err = mTextDriver->selectTrack(trackIndex);
-        if (err == OK) {
-            modifyFlags(TEXTPLAYER_INITIALIZED, SET);
-            if (mFlags & PLAYING && !(mFlags & TEXT_RUNNING)) {
-                mTextDriver->start();
-                modifyFlags(TEXT_RUNNING, SET);
-            }
-        }
-    } else {
-        err = mTextDriver->unselectTrack(trackIndex);
-        if (err == OK) {
-            modifyFlags(TEXTPLAYER_INITIALIZED, CLEAR);
-            modifyFlags(TEXT_RUNNING, CLEAR);
-        }
-    }
-    return err;
-}
-
-size_t AwesomePlayer::countTracks() const {
-    return mExtractor->countTracks() + mTextDriver->countExternalTracks();
-}
-
-status_t AwesomePlayer::setVideoScalingMode(int32_t mode) {
-    Mutex::Autolock lock(mLock);
-    return setVideoScalingMode_l(mode);
-}
-
-status_t AwesomePlayer::setVideoScalingMode_l(int32_t mode) {
-    mVideoScalingMode = mode;
-    if (mNativeWindow != NULL) {
-        status_t err = native_window_set_scaling_mode(
-                mNativeWindow.get(), mVideoScalingMode);
-        if (err != OK) {
-            ALOGW("Failed to set scaling mode: %d", err);
-        }
-        return err;
-    }
-    return OK;
-}
-
-status_t AwesomePlayer::invoke(const Parcel &request, Parcel *reply) {
-    ATRACE_CALL();
-    if (NULL == reply) {
-        return android::BAD_VALUE;
-    }
-    int32_t methodId;
-    status_t ret = request.readInt32(&methodId);
-    if (ret != android::OK) {
-        return ret;
-    }
-    switch(methodId) {
-        case INVOKE_ID_SET_VIDEO_SCALING_MODE:
-        {
-            int mode = request.readInt32();
-            return setVideoScalingMode(mode);
-        }
-
-        case INVOKE_ID_GET_TRACK_INFO:
-        {
-            return getTrackInfo(reply);
-        }
-        case INVOKE_ID_ADD_EXTERNAL_SOURCE:
-        {
-            Mutex::Autolock autoLock(mLock);
-            if (mTextDriver == NULL) {
-                mTextDriver = new TimedTextDriver(mListener, mHTTPService);
-            }
-            // String values written in Parcel are UTF-16 values.
-            String8 uri(request.readString16());
-            String8 mimeType(request.readString16());
-            size_t nTracks = countTracks();
-            return mTextDriver->addOutOfBandTextSource(nTracks, uri, mimeType);
-        }
-        case INVOKE_ID_ADD_EXTERNAL_SOURCE_FD:
-        {
-            Mutex::Autolock autoLock(mLock);
-            if (mTextDriver == NULL) {
-                mTextDriver = new TimedTextDriver(mListener, mHTTPService);
-            }
-            int fd         = request.readFileDescriptor();
-            off64_t offset = request.readInt64();
-            off64_t length  = request.readInt64();
-            String8 mimeType(request.readString16());
-            size_t nTracks = countTracks();
-            return mTextDriver->addOutOfBandTextSource(
-                    nTracks, fd, offset, length, mimeType);
-        }
-        case INVOKE_ID_SELECT_TRACK:
-        {
-            int trackIndex = request.readInt32();
-            return selectTrack(trackIndex, true /* select */);
-        }
-        case INVOKE_ID_UNSELECT_TRACK:
-        {
-            int trackIndex = request.readInt32();
-            return selectTrack(trackIndex, false /* select */);
-        }
-        default:
-        {
-            return ERROR_UNSUPPORTED;
-        }
-    }
-    // It will not reach here.
-    return OK;
-}
-
-bool AwesomePlayer::isStreamingHTTP() const {
-    return mCachedSource != NULL || mWVMExtractor != NULL;
-}
-
-status_t AwesomePlayer::dump(
-        int fd, const Vector<String16> & /* args */) const {
-    Mutex::Autolock autoLock(mStatsLock);
-
-    FILE *out = fdopen(dup(fd), "w");
-
-    fprintf(out, " AwesomePlayer\n");
-    if (mStats.mFd < 0) {
-        fprintf(out, "  URI(%s)", uriDebugString(mUri, mFlags & INCOGNITO).c_str());
-    } else {
-        fprintf(out, "  fd(%d)", mStats.mFd);
-    }
-
-    fprintf(out, ", flags(0x%08x)", mStats.mFlags);
-
-    if (mStats.mBitrate >= 0) {
-        fprintf(out, ", bitrate(%" PRId64 " bps)", mStats.mBitrate);
-    }
-
-    fprintf(out, "\n");
-
-    for (size_t i = 0; i < mStats.mTracks.size(); ++i) {
-        const TrackStat &stat = mStats.mTracks.itemAt(i);
-
-        fprintf(out, "  Track %zu\n", i + 1);
-        fprintf(out, "   MIME(%s)", stat.mMIME.string());
-
-        if (!stat.mDecoderName.isEmpty()) {
-            fprintf(out, ", decoder(%s)", stat.mDecoderName.string());
-        }
-
-        fprintf(out, "\n");
-
-        if ((ssize_t)i == mStats.mVideoTrackIndex) {
-            fprintf(out,
-                    "   videoDimensions(%d x %d), "
-                    "numVideoFramesDecoded(%" PRId64 "), "
-                    "numVideoFramesDropped(%" PRId64 ")\n",
-                    mStats.mVideoWidth,
-                    mStats.mVideoHeight,
-                    mStats.mNumVideoFramesDecoded,
-                    mStats.mNumVideoFramesDropped);
-        }
-    }
-
-    fclose(out);
-    out = NULL;
-
-    return OK;
-}
-
-void AwesomePlayer::modifyFlags(unsigned value, FlagMode mode) {
-    switch (mode) {
-        case SET:
-            mFlags |= value;
-            break;
-        case CLEAR:
-            if ((value & CACHE_UNDERRUN) && (mFlags & CACHE_UNDERRUN)) {
-                notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_END);
-            }
-            mFlags &= ~value;
-            break;
-        case ASSIGN:
-            mFlags = value;
-            break;
-        default:
-            TRESPASS();
-    }
-
-    {
-        Mutex::Autolock autoLock(mStatsLock);
-        mStats.mFlags = mFlags;
-    }
-}
-
-void AwesomePlayer::onAudioTearDownEvent() {
-
-    Mutex::Autolock autoLock(mLock);
-    if (!mAudioTearDownEventPending) {
-        return;
-    }
-    mAudioTearDownEventPending = false;
-
-    ALOGV("onAudioTearDownEvent");
-
-    // stream info is cleared by reset_l() so copy what we need
-    mAudioTearDownWasPlaying = (mFlags & PLAYING);
-    KeyedVector<String8, String8> uriHeaders(mUriHeaders);
-    sp<DataSource> fileSource(mFileSource);
-
-    mStatsLock.lock();
-    String8 uri(mStats.mURI);
-    mStatsLock.unlock();
-
-    // get current position so we can start recreated stream from here
-    getPosition(&mAudioTearDownPosition);
-
-    sp<IMediaHTTPService> savedHTTPService = mHTTPService;
-
-    bool wasLooping = mFlags & LOOPING;
-    // Reset and recreate
-    reset_l();
-
-    status_t err;
-
-    if (fileSource != NULL) {
-        mFileSource = fileSource;
-        err = setDataSource_l(fileSource);
-    } else {
-        err = setDataSource_l(savedHTTPService, uri, &uriHeaders);
-    }
-
-    mFlags |= PREPARING;
-    if ( err != OK ) {
-        // This will force beingPrepareAsync_l() to notify
-        // a MEDIA_ERROR to the client and abort the prepare
-        mFlags |= PREPARE_CANCELLED;
-    }
-    if (wasLooping) {
-        mFlags |= LOOPING;
-    }
-
-    mAudioTearDown = true;
-    mIsAsyncPrepare = true;
-
-    // Call prepare for the host decoding
-    beginPrepareAsync_l();
-}
-
-}  // namespace android
diff --git a/media/libstagefright/CallbackDataSource.cpp b/media/libstagefright/CallbackDataSource.cpp
index e17fdf8..0df7da4 100644
--- a/media/libstagefright/CallbackDataSource.cpp
+++ b/media/libstagefright/CallbackDataSource.cpp
@@ -30,14 +30,17 @@
 
 CallbackDataSource::CallbackDataSource(
     const sp<IDataSource>& binderDataSource)
-    : mIDataSource(binderDataSource) {
+    : mIDataSource(binderDataSource),
+      mIsClosed(false) {
     // Set up the buffer to read into.
     mMemory = mIDataSource->getIMemory();
+    mName = String8::format("CallbackDataSource(%s)", mIDataSource->toString().string());
+
 }
 
 CallbackDataSource::~CallbackDataSource() {
     ALOGV("~CallbackDataSource");
-    mIDataSource->close();
+    close();
 }
 
 status_t CallbackDataSource::initCheck() const {
@@ -64,7 +67,7 @@
             mIDataSource->readAt(offset + totalNumRead, numToRead);
         // A negative return value represents an error. Pass it on.
         if (numRead < 0) {
-            return numRead;
+            return numRead == ERROR_END_OF_STREAM && totalNumRead > 0 ? totalNumRead : numRead;
         }
         // A zero return value signals EOS. Return the bytes read so far.
         if (numRead == 0) {
@@ -95,8 +98,24 @@
     return OK;
 }
 
+uint32_t CallbackDataSource::flags() {
+    return mIDataSource->getFlags();
+}
+
+void CallbackDataSource::close() {
+    if (!mIsClosed) {
+        mIDataSource->close();
+        mIsClosed = true;
+    }
+}
+
+sp<DecryptHandle> CallbackDataSource::DrmInitialization(const char *mime) {
+    return mIDataSource->DrmInitialization(mime);
+}
+
 TinyCacheSource::TinyCacheSource(const sp<DataSource>& source)
     : mSource(source), mCachedOffset(0), mCachedSize(0) {
+    mName = String8::format("TinyCacheSource(%s)", mSource->toString().string());
 }
 
 status_t TinyCacheSource::initCheck() const {
@@ -131,12 +150,19 @@
         }
     }
 
+
     // Fill the cache and copy to the caller.
     const ssize_t numRead = mSource->readAt(offset, mCache, kCacheSize);
     if (numRead <= 0) {
+        // Flush cache on error
+        mCachedSize = 0;
+        mCachedOffset = 0;
         return numRead;
     }
     if ((size_t)numRead > kCacheSize) {
+        // Flush cache on error
+        mCachedSize = 0;
+        mCachedOffset = 0;
         return ERROR_OUT_OF_RANGE;
     }
 
@@ -157,4 +183,11 @@
     return mSource->flags();
 }
 
+sp<DecryptHandle> TinyCacheSource::DrmInitialization(const char *mime) {
+    // flush cache when DrmInitialization occurs since decrypted
+    // data may differ from what is in cache.
+    mCachedOffset = 0;
+    mCachedSize = 0;
+    return mSource->DrmInitialization(mime);
+}
 } // namespace android
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index fa30644..e087249 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -22,15 +22,16 @@
 
 #include <OMX_Component.h>
 #include <binder/IPCThreadState.h>
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <media/hardware/HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/CameraSource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
-#include <media/hardware/HardwareAPI.h>
 #include <camera/Camera.h>
 #include <camera/CameraParameters.h>
-#include <camera/ICameraRecordingProxy.h>
 #include <gui/Surface.h>
 #include <utils/String8.h>
 #include <cutils/properties.h>
@@ -55,6 +56,8 @@
     virtual void postDataTimestamp(
             nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
 
+    virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle);
+
 protected:
     virtual ~CameraSourceListener();
 
@@ -99,6 +102,14 @@
     }
 }
 
+void CameraSourceListener::postRecordingFrameHandleTimestamp(nsecs_t timestamp,
+        native_handle_t* handle) {
+    sp<CameraSource> source = mSource.promote();
+    if (source.get() != nullptr) {
+        source->recordingFrameHandleCallbackTimestamp(timestamp/1000, handle);
+    }
+}
+
 static int32_t getColorFormat(const char* colorFormat) {
     if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420P)) {
        return OMX_COLOR_FormatYUV420Planar;
@@ -140,35 +151,37 @@
     size.width = -1;
     size.height = -1;
 
-    sp<ICamera> camera;
-    return new CameraSource(camera, NULL, 0, clientName, -1,
-            size, -1, NULL, false);
+    sp<hardware::ICamera> camera;
+    return new CameraSource(camera, NULL, 0, clientName, Camera::USE_CALLING_UID,
+            Camera::USE_CALLING_PID, size, -1, NULL, false);
 }
 
 // static
 CameraSource *CameraSource::CreateFromCamera(
-    const sp<ICamera>& camera,
+    const sp<hardware::ICamera>& camera,
     const sp<ICameraRecordingProxy>& proxy,
     int32_t cameraId,
     const String16& clientName,
     uid_t clientUid,
+    pid_t clientPid,
     Size videoSize,
     int32_t frameRate,
     const sp<IGraphicBufferProducer>& surface,
     bool storeMetaDataInVideoBuffers) {
 
     CameraSource *source = new CameraSource(camera, proxy, cameraId,
-            clientName, clientUid, videoSize, frameRate, surface,
+            clientName, clientUid, clientPid, videoSize, frameRate, surface,
             storeMetaDataInVideoBuffers);
     return source;
 }
 
 CameraSource::CameraSource(
-    const sp<ICamera>& camera,
+    const sp<hardware::ICamera>& camera,
     const sp<ICameraRecordingProxy>& proxy,
     int32_t cameraId,
     const String16& clientName,
     uid_t clientUid,
+    pid_t clientPid,
     Size videoSize,
     int32_t frameRate,
     const sp<IGraphicBufferProducer>& surface,
@@ -192,7 +205,7 @@
     mVideoSize.height = -1;
 
     mInitCheck = init(camera, proxy, cameraId,
-                    clientName, clientUid,
+                    clientName, clientUid, clientPid,
                     videoSize, frameRate,
                     storeMetaDataInVideoBuffers);
     if (mInitCheck != OK) releaseCamera();
@@ -203,11 +216,11 @@
 }
 
 status_t CameraSource::isCameraAvailable(
-    const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
-    int32_t cameraId, const String16& clientName, uid_t clientUid) {
+    const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
+    int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid) {
 
     if (camera == 0) {
-        mCamera = Camera::connect(cameraId, clientName, clientUid);
+        mCamera = Camera::connect(cameraId, clientName, clientUid, clientPid);
         if (mCamera == 0) return -EBUSY;
         mCameraFlags &= ~FLAGS_HOT_CAMERA;
     } else {
@@ -486,11 +499,12 @@
  * @return OK if no error.
  */
 status_t CameraSource::init(
-        const sp<ICamera>& camera,
+        const sp<hardware::ICamera>& camera,
         const sp<ICameraRecordingProxy>& proxy,
         int32_t cameraId,
         const String16& clientName,
         uid_t clientUid,
+        pid_t clientPid,
         Size videoSize,
         int32_t frameRate,
         bool storeMetaDataInVideoBuffers) {
@@ -498,19 +512,94 @@
     ALOGV("init");
     status_t err = OK;
     int64_t token = IPCThreadState::self()->clearCallingIdentity();
-    err = initWithCameraAccess(camera, proxy, cameraId, clientName, clientUid,
+    err = initWithCameraAccess(camera, proxy, cameraId, clientName, clientUid, clientPid,
                                videoSize, frameRate,
                                storeMetaDataInVideoBuffers);
     IPCThreadState::self()->restoreCallingIdentity(token);
     return err;
 }
 
+void CameraSource::createVideoBufferMemoryHeap(size_t size, uint32_t bufferCount) {
+    mMemoryHeapBase = new MemoryHeapBase(size * bufferCount, 0,
+            "StageFright-CameraSource-BufferHeap");
+    for (uint32_t i = 0; i < bufferCount; i++) {
+        mMemoryBases.push_back(new MemoryBase(mMemoryHeapBase, i * size, size));
+    }
+}
+
+status_t CameraSource::initBufferQueue(uint32_t width, uint32_t height,
+        uint32_t format, android_dataspace dataSpace, uint32_t bufferCount) {
+    ALOGV("initBufferQueue");
+
+    if (mVideoBufferConsumer != nullptr || mVideoBufferProducer != nullptr) {
+        ALOGE("%s: Buffer queue already exists", __FUNCTION__);
+        return ALREADY_EXISTS;
+    }
+
+    // Create a buffer queue.
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+
+    uint32_t usage = GRALLOC_USAGE_SW_READ_OFTEN;
+    if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+        usage = GRALLOC_USAGE_HW_VIDEO_ENCODER;
+    }
+
+    bufferCount += kConsumerBufferCount;
+
+    mVideoBufferConsumer = new BufferItemConsumer(consumer, usage, bufferCount);
+    mVideoBufferConsumer->setName(String8::format("StageFright-CameraSource"));
+    mVideoBufferProducer = producer;
+
+    status_t res = mVideoBufferConsumer->setDefaultBufferSize(width, height);
+    if (res != OK) {
+        ALOGE("%s: Could not set buffer dimensions %dx%d: %s (%d)", __FUNCTION__, width, height,
+                strerror(-res), res);
+        return res;
+    }
+
+    res = mVideoBufferConsumer->setDefaultBufferFormat(format);
+    if (res != OK) {
+        ALOGE("%s: Could not set buffer format %d: %s (%d)", __FUNCTION__, format,
+                strerror(-res), res);
+        return res;
+    }
+
+    res = mVideoBufferConsumer->setDefaultBufferDataSpace(dataSpace);
+    if (res != OK) {
+        ALOGE("%s: Could not set data space %d: %s (%d)", __FUNCTION__, dataSpace,
+                strerror(-res), res);
+        return res;
+    }
+
+    res = mCamera->setVideoTarget(mVideoBufferProducer);
+    if (res != OK) {
+        ALOGE("%s: Failed to set video target: %s (%d)", __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    // Create memory heap to store buffers as VideoNativeMetadata.
+    createVideoBufferMemoryHeap(sizeof(VideoNativeMetadata), bufferCount);
+
+    mBufferQueueListener = new BufferQueueListener(mVideoBufferConsumer, this);
+    res = mBufferQueueListener->run("CameraSource-BufferQueueListener");
+    if (res != OK) {
+        ALOGE("%s: Could not run buffer queue listener thread: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
 status_t CameraSource::initWithCameraAccess(
-        const sp<ICamera>& camera,
+        const sp<hardware::ICamera>& camera,
         const sp<ICameraRecordingProxy>& proxy,
         int32_t cameraId,
         const String16& clientName,
         uid_t clientUid,
+        pid_t clientPid,
         Size videoSize,
         int32_t frameRate,
         bool storeMetaDataInVideoBuffers) {
@@ -518,7 +607,7 @@
     status_t err = OK;
 
     if ((err = isCameraAvailable(camera, proxy, cameraId,
-            clientName, clientUid)) != OK) {
+            clientName, clientUid, clientPid)) != OK) {
         ALOGE("Camera connection could not be established.");
         return err;
     }
@@ -553,12 +642,23 @@
         CHECK_EQ((status_t)OK, mCamera->setPreviewTarget(mSurface));
     }
 
-    // By default, do not store metadata in video buffers
-    mIsMetaDataStoredInVideoBuffers = false;
-    mCamera->storeMetaDataInBuffers(false);
+    // By default, store real data in video buffers.
+    mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV;
     if (storeMetaDataInVideoBuffers) {
-        if (OK == mCamera->storeMetaDataInBuffers(true)) {
-            mIsMetaDataStoredInVideoBuffers = true;
+        if (OK == mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE)) {
+            mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE;
+        } else if (OK == mCamera->setVideoBufferMode(
+                hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA)) {
+            mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA;
+        }
+    }
+
+    if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV) {
+        err = mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV);
+        if (err != OK) {
+            ALOGE("%s: Setting video buffer mode to VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV failed: "
+                    "%s (err=%d)", __FUNCTION__, strerror(-err), err);
+            return err;
         }
     }
 
@@ -598,26 +698,42 @@
     // will connect to the camera in ICameraRecordingProxy::startRecording.
     int64_t token = IPCThreadState::self()->clearCallingIdentity();
     status_t err;
-    if (mNumInputBuffers > 0) {
+
+    if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
+        // Initialize buffer queue.
+        err = initBufferQueue(mVideoSize.width, mVideoSize.height, mEncoderFormat,
+                (android_dataspace_t)mEncoderDataSpace,
+                mNumInputBuffers > 0 ? mNumInputBuffers : 1);
+        if (err != OK) {
+            ALOGE("%s: Failed to initialize buffer queue: %s (err=%d)", __FUNCTION__,
+                    strerror(-err), err);
+            return err;
+        }
+    } else {
+        if (mNumInputBuffers > 0) {
+            err = mCamera->sendCommand(
+                CAMERA_CMD_SET_VIDEO_BUFFER_COUNT, mNumInputBuffers, 0);
+
+            // This could happen for CameraHAL1 clients; thus the failure is
+            // not a fatal error
+            if (err != OK) {
+                ALOGW("Failed to set video buffer count to %d due to %d",
+                    mNumInputBuffers, err);
+            }
+        }
+
         err = mCamera->sendCommand(
-            CAMERA_CMD_SET_VIDEO_BUFFER_COUNT, mNumInputBuffers, 0);
+            CAMERA_CMD_SET_VIDEO_FORMAT, mEncoderFormat, mEncoderDataSpace);
 
         // This could happen for CameraHAL1 clients; thus the failure is
         // not a fatal error
         if (err != OK) {
-            ALOGW("Failed to set video buffer count to %d due to %d",
-                mNumInputBuffers, err);
+            ALOGW("Failed to set video encoder format/dataspace to %d, %d due to %d",
+                    mEncoderFormat, mEncoderDataSpace, err);
         }
-    }
 
-    err = mCamera->sendCommand(
-        CAMERA_CMD_SET_VIDEO_FORMAT, mEncoderFormat, mEncoderDataSpace);
-
-    // This could happen for CameraHAL1 clients; thus the failure is
-    // not a fatal error
-    if (err != OK) {
-        ALOGW("Failed to set video encoder format/dataspace to %d, %d due to %d",
-                mEncoderFormat, mEncoderDataSpace, err);
+        // Create memory heap to store buffers as VideoNativeMetadata.
+        createVideoBufferMemoryHeap(sizeof(VideoNativeHandleMetadata), kDefaultVideoBufferCount);
     }
 
     err = OK;
@@ -658,7 +774,7 @@
     mStartTimeUs = 0;
     mNumInputBuffers = 0;
     mEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
-    mEncoderDataSpace = HAL_DATASPACE_BT709;
+    mEncoderDataSpace = HAL_DATASPACE_V0_BT709;
 
     if (meta) {
         int64_t startTimeUs;
@@ -674,10 +790,10 @@
 
         // apply encoder color format if specified
         if (meta->findInt32(kKeyPixelFormat, &mEncoderFormat)) {
-            ALOGV("Using encoder format: %#x", mEncoderFormat);
+            ALOGI("Using encoder format: %#x", mEncoderFormat);
         }
         if (meta->findInt32(kKeyColorSpace, &mEncoderDataSpace)) {
-            ALOGV("Using encoder data space: %#x", mEncoderDataSpace);
+            ALOGI("Using encoder data space: %#x", mEncoderDataSpace);
         }
     }
 
@@ -692,10 +808,14 @@
 void CameraSource::stopCameraRecording() {
     ALOGV("stopCameraRecording");
     if (mCameraFlags & FLAGS_HOT_CAMERA) {
-        mCameraRecordingProxy->stopRecording();
+        if (mCameraRecordingProxy != 0) {
+            mCameraRecordingProxy->stopRecording();
+        }
     } else {
-        mCamera->setListener(NULL);
-        mCamera->stopRecording();
+        if (mCamera != 0) {
+            mCamera->setListener(NULL);
+            mCamera->stopRecording();
+        }
     }
 }
 
@@ -773,6 +893,14 @@
         CHECK_EQ(mNumFramesReceived, mNumFramesEncoded + mNumFramesDropped);
     }
 
+    if (mBufferQueueListener != nullptr) {
+        mBufferQueueListener->requestExit();
+        mBufferQueueListener->join();
+        mBufferQueueListener.clear();
+    }
+
+    mVideoBufferConsumer.clear();
+    mVideoBufferProducer.clear();
     releaseCamera();
 
     ALOGD("reset: X");
@@ -781,12 +909,60 @@
 
 void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) {
     ALOGV("releaseRecordingFrame");
-    if (mCameraRecordingProxy != NULL) {
-        mCameraRecordingProxy->releaseRecordingFrame(frame);
-    } else if (mCamera != NULL) {
-        int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        mCamera->releaseRecordingFrame(frame);
-        IPCThreadState::self()->restoreCallingIdentity(token);
+
+    if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
+        // Return the buffer to buffer queue in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
+        ssize_t offset;
+        size_t size;
+        sp<IMemoryHeap> heap = frame->getMemory(&offset, &size);
+        if (heap->getHeapID() != mMemoryHeapBase->getHeapID()) {
+            ALOGE("%s: Mismatched heap ID, ignoring release (got %x, expected %x)", __FUNCTION__,
+                    heap->getHeapID(), mMemoryHeapBase->getHeapID());
+            return;
+        }
+
+        VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
+                (uint8_t*)heap->getBase() + offset);
+
+        // Find the corresponding buffer item for the native window buffer.
+        ssize_t index = mReceivedBufferItemMap.indexOfKey(payload->pBuffer);
+        if (index == NAME_NOT_FOUND) {
+            ALOGE("%s: Couldn't find buffer item for %p", __FUNCTION__, payload->pBuffer);
+            return;
+        }
+
+        BufferItem buffer = mReceivedBufferItemMap.valueAt(index);
+        mReceivedBufferItemMap.removeItemsAt(index);
+        mVideoBufferConsumer->releaseBuffer(buffer);
+        mMemoryBases.push_back(frame);
+        mMemoryBaseAvailableCond.signal();
+    } else {
+        native_handle_t* handle = nullptr;
+
+        // Check if frame contains a VideoNativeHandleMetadata.
+        if (frame->size() == sizeof(VideoNativeHandleMetadata)) {
+            VideoNativeHandleMetadata *metadata =
+                (VideoNativeHandleMetadata*)(frame->pointer());
+            if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
+                handle = metadata->pHandle;
+            }
+        }
+
+        if (handle != nullptr) {
+            // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
+            releaseRecordingFrameHandle(handle);
+            mMemoryBases.push_back(frame);
+            mMemoryBaseAvailableCond.signal();
+        } else if (mCameraRecordingProxy != nullptr) {
+            // mCamera is created by application. Return the frame back to camera via camera
+            // recording proxy.
+            mCameraRecordingProxy->releaseRecordingFrame(frame);
+        } else if (mCamera != nullptr) {
+            // mCamera is created by CameraSource. Return the frame directly back to camera.
+            int64_t token = IPCThreadState::self()->clearCallingIdentity();
+            mCamera->releaseRecordingFrame(frame);
+            IPCThreadState::self()->restoreCallingIdentity(token);
+        }
     }
 }
 
@@ -794,8 +970,6 @@
     List<sp<IMemory> >::iterator it;
     while (!mFramesReceived.empty()) {
         it = mFramesReceived.begin();
-        // b/28466701
-        adjustOutgoingANWBuffer(it->get());
         releaseRecordingFrame(*it);
         mFramesReceived.erase(it);
         ++mNumFramesDropped;
@@ -816,9 +990,6 @@
     for (List<sp<IMemory> >::iterator it = mFramesBeingEncoded.begin();
          it != mFramesBeingEncoded.end(); ++it) {
         if ((*it)->pointer() ==  buffer->data()) {
-            // b/28466701
-            adjustOutgoingANWBuffer(it->get());
-
             releaseOneRecordingFrame((*it));
             mFramesBeingEncoded.erase(it);
             ++mNumFramesEncoded;
@@ -878,29 +1049,23 @@
     return OK;
 }
 
-void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
-        int32_t msgType __unused, const sp<IMemory> &data) {
-    ALOGV("dataCallbackTimestamp: timestamp %lld us", (long long)timestampUs);
-    Mutex::Autolock autoLock(mLock);
+bool CameraSource::shouldSkipFrameLocked(int64_t timestampUs) {
     if (!mStarted || (mNumFramesReceived == 0 && timestampUs < mStartTimeUs)) {
         ALOGV("Drop frame at %lld/%lld us", (long long)timestampUs, (long long)mStartTimeUs);
-        releaseOneRecordingFrame(data);
-        return;
+        return true;
     }
 
     // May need to skip frame or modify timestamp. Currently implemented
     // by the subclass CameraSourceTimeLapse.
     if (skipCurrentFrame(timestampUs)) {
-        releaseOneRecordingFrame(data);
-        return;
+        return true;
     }
 
     if (mNumFramesReceived > 0) {
         if (timestampUs <= mLastFrameTimestampUs) {
             ALOGW("Dropping frame with backward timestamp %lld (last %lld)",
                     (long long)timestampUs, (long long)mLastFrameTimestampUs);
-            releaseOneRecordingFrame(data);
-            return;
+            return true;
         }
         if (timestampUs - mLastFrameTimestampUs > mGlitchDurationThresholdUs) {
             ++mNumGlitches;
@@ -915,18 +1080,161 @@
             if (timestampUs < mStartTimeUs) {
                 // Frame was captured before recording was started
                 // Drop it without updating the statistical data.
-                releaseOneRecordingFrame(data);
-                return;
+                return true;
             }
             mStartTimeUs = timestampUs - mStartTimeUs;
         }
     }
+
+    return false;
+}
+
+void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
+        int32_t msgType __unused, const sp<IMemory> &data) {
+    ALOGV("dataCallbackTimestamp: timestamp %lld us", (long long)timestampUs);
+    Mutex::Autolock autoLock(mLock);
+
+    if (shouldSkipFrameLocked(timestampUs)) {
+        releaseOneRecordingFrame(data);
+        return;
+    }
+
     ++mNumFramesReceived;
 
     CHECK(data != NULL && data->size() > 0);
+    mFramesReceived.push_back(data);
+    int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
+    mFrameTimes.push_back(timeUs);
+    ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
+        mStartTimeUs, timeUs);
+    mFrameAvailableCondition.signal();
+}
 
-    // b/28466701
-    adjustIncomingANWBuffer(data.get());
+void CameraSource::releaseRecordingFrameHandle(native_handle_t* handle) {
+    if (mCameraRecordingProxy != nullptr) {
+        mCameraRecordingProxy->releaseRecordingFrameHandle(handle);
+    } else if (mCamera != nullptr) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
+        mCamera->releaseRecordingFrameHandle(handle);
+        IPCThreadState::self()->restoreCallingIdentity(token);
+    }
+}
+
+void CameraSource::recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
+                native_handle_t* handle) {
+    ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
+    Mutex::Autolock autoLock(mLock);
+    if (handle == nullptr) return;
+
+    if (shouldSkipFrameLocked(timestampUs)) {
+        releaseRecordingFrameHandle(handle);
+        return;
+    }
+
+    while (mMemoryBases.empty()) {
+        if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
+                TIMED_OUT) {
+            ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
+            releaseRecordingFrameHandle(handle);
+            return;
+        }
+    }
+
+    ++mNumFramesReceived;
+
+    sp<IMemory> data = *mMemoryBases.begin();
+    mMemoryBases.erase(mMemoryBases.begin());
+
+    // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
+    VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
+    metadata->eType = kMetadataBufferTypeNativeHandleSource;
+    metadata->pHandle = handle;
+
+    mFramesReceived.push_back(data);
+    int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
+    mFrameTimes.push_back(timeUs);
+    ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64, mStartTimeUs, timeUs);
+    mFrameAvailableCondition.signal();
+}
+
+CameraSource::BufferQueueListener::BufferQueueListener(const sp<BufferItemConsumer>& consumer,
+        const sp<CameraSource>& cameraSource) {
+    mConsumer = consumer;
+    mConsumer->setFrameAvailableListener(this);
+    mCameraSource = cameraSource;
+}
+
+void CameraSource::BufferQueueListener::onFrameAvailable(const BufferItem& /*item*/) {
+    ALOGV("%s: onFrameAvailable", __FUNCTION__);
+
+    Mutex::Autolock l(mLock);
+
+    if (!mFrameAvailable) {
+        mFrameAvailable = true;
+        mFrameAvailableSignal.signal();
+    }
+}
+
+bool CameraSource::BufferQueueListener::threadLoop() {
+    if (mConsumer == nullptr || mCameraSource == nullptr) {
+        return false;
+    }
+
+    {
+        Mutex::Autolock l(mLock);
+        while (!mFrameAvailable) {
+            if (mFrameAvailableSignal.waitRelative(mLock, kFrameAvailableTimeout) == TIMED_OUT) {
+                return true;
+            }
+        }
+        mFrameAvailable = false;
+    }
+
+    BufferItem buffer;
+    while (mConsumer->acquireBuffer(&buffer, 0) == OK) {
+        mCameraSource->processBufferQueueFrame(buffer);
+    }
+
+    return true;
+}
+
+void CameraSource::processBufferQueueFrame(BufferItem& buffer) {
+    Mutex::Autolock autoLock(mLock);
+
+    int64_t timestampUs = buffer.mTimestamp / 1000;
+    if (shouldSkipFrameLocked(timestampUs)) {
+        mVideoBufferConsumer->releaseBuffer(buffer);
+        return;
+    }
+
+    while (mMemoryBases.empty()) {
+        if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
+                TIMED_OUT) {
+            ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
+            mVideoBufferConsumer->releaseBuffer(buffer);
+            return;
+        }
+    }
+
+    ++mNumFramesReceived;
+
+    // Find a available memory slot to store the buffer as VideoNativeMetadata.
+    sp<IMemory> data = *mMemoryBases.begin();
+    mMemoryBases.erase(mMemoryBases.begin());
+
+    ssize_t offset;
+    size_t size;
+    sp<IMemoryHeap> heap = data->getMemory(&offset, &size);
+    VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
+        (uint8_t*)heap->getBase() + offset);
+    memset(payload, 0, sizeof(VideoNativeMetadata));
+    payload->eType = kMetadataBufferTypeANWBuffer;
+    payload->pBuffer = buffer.mGraphicBuffer->getNativeBuffer();
+    payload->nFenceFd = -1;
+
+    // Add the mapping so we can find the corresponding buffer item to release to the buffer queue
+    // when the encoder returns the native window buffer.
+    mReceivedBufferItemMap.add(payload->pBuffer, buffer);
 
     mFramesReceived.push_back(data);
     int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
@@ -936,26 +1244,18 @@
     mFrameAvailableCondition.signal();
 }
 
-bool CameraSource::isMetaDataStoredInVideoBuffers() const {
-    ALOGV("isMetaDataStoredInVideoBuffers");
-    return mIsMetaDataStoredInVideoBuffers;
-}
+MetadataBufferType CameraSource::metaDataStoredInVideoBuffers() const {
+    ALOGV("metaDataStoredInVideoBuffers");
 
-void CameraSource::adjustIncomingANWBuffer(IMemory* data) {
-    VideoNativeMetadata *payload =
-            reinterpret_cast<VideoNativeMetadata*>(data->pointer());
-    if (payload->eType == kMetadataBufferTypeANWBuffer) {
-        payload->pBuffer = (ANativeWindowBuffer*)(((uint8_t*)payload->pBuffer) +
-                ICameraRecordingProxy::getCommonBaseAddress());
-    }
-}
-
-void CameraSource::adjustOutgoingANWBuffer(IMemory* data) {
-    VideoNativeMetadata *payload =
-            reinterpret_cast<VideoNativeMetadata*>(data->pointer());
-    if (payload->eType == kMetadataBufferTypeANWBuffer) {
-        payload->pBuffer = (ANativeWindowBuffer*)(((uint8_t*)payload->pBuffer) -
-                ICameraRecordingProxy::getCommonBaseAddress());
+    // Output buffers will contain metadata if camera sends us buffer in metadata mode or via
+    // buffer queue.
+    switch (mVideoBufferMode) {
+        case hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA:
+            return kMetadataBufferTypeNativeHandleSource;
+        case hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE:
+            return kMetadataBufferTypeANWBuffer;
+        default:
+            return kMetadataBufferTypeInvalid;
     }
 }
 
@@ -968,6 +1268,11 @@
     mSource->dataCallbackTimestamp(timestamp / 1000, msgType, dataPtr);
 }
 
+void CameraSource::ProxyListener::recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
+        native_handle_t* handle) {
+    mSource->recordingFrameHandleCallbackTimestamp(timestamp / 1000, handle);
+}
+
 void CameraSource::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
     ALOGI("Camera recording proxy died");
 }
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 0acd9d0..390c556 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -35,11 +35,12 @@
 
 // static
 CameraSourceTimeLapse *CameraSourceTimeLapse::CreateFromCamera(
-        const sp<ICamera> &camera,
+        const sp<hardware::ICamera> &camera,
         const sp<ICameraRecordingProxy> &proxy,
         int32_t cameraId,
         const String16& clientName,
         uid_t clientUid,
+        pid_t clientPid,
         Size videoSize,
         int32_t videoFrameRate,
         const sp<IGraphicBufferProducer>& surface,
@@ -48,7 +49,7 @@
 
     CameraSourceTimeLapse *source = new
             CameraSourceTimeLapse(camera, proxy, cameraId,
-                clientName, clientUid,
+                clientName, clientUid, clientPid,
                 videoSize, videoFrameRate, surface,
                 timeBetweenFrameCaptureUs,
                 storeMetaDataInVideoBuffers);
@@ -63,17 +64,18 @@
 }
 
 CameraSourceTimeLapse::CameraSourceTimeLapse(
-        const sp<ICamera>& camera,
+        const sp<hardware::ICamera>& camera,
         const sp<ICameraRecordingProxy>& proxy,
         int32_t cameraId,
         const String16& clientName,
         uid_t clientUid,
+        pid_t clientPid,
         Size videoSize,
         int32_t videoFrameRate,
         const sp<IGraphicBufferProducer>& surface,
         int64_t timeBetweenFrameCaptureUs,
         bool storeMetaDataInVideoBuffers)
-      : CameraSource(camera, proxy, cameraId, clientName, clientUid,
+      : CameraSource(camera, proxy, cameraId, clientName, clientUid, clientPid,
                 videoSize, videoFrameRate, surface,
                 storeMetaDataInVideoBuffers),
       mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
@@ -306,4 +308,19 @@
     CameraSource::dataCallbackTimestamp(timestampUs, msgType, data);
 }
 
+void CameraSourceTimeLapse::recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
+            native_handle_t* handle) {
+    ALOGV("recordingFrameHandleCallbackTimestamp");
+    mSkipCurrentFrame = skipFrameAndModifyTimeStamp(&timestampUs);
+    CameraSource::recordingFrameHandleCallbackTimestamp(timestampUs, handle);
+}
+
+void CameraSourceTimeLapse::processBufferQueueFrame(BufferItem& buffer) {
+    ALOGV("processBufferQueueFrame");
+    int64_t timestampUs = buffer.mTimestamp / 1000;
+    mSkipCurrentFrame = skipFrameAndModifyTimeStamp(&timestampUs);
+    buffer.mTimestamp = timestampUs * 1000;
+    CameraSource::processBufferQueueFrame(buffer);
+}
+
 }  // namespace android
diff --git a/media/libstagefright/ClockEstimator.cpp b/media/libstagefright/ClockEstimator.cpp
deleted file mode 100644
index 34d1e42..0000000
--- a/media/libstagefright/ClockEstimator.cpp
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
-**
-** Copyright 2014, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ClockEstimator"
-#include <utils/Log.h>
-
-#include <math.h>
-#include <media/stagefright/ClockEstimator.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-
-namespace android {
-
-WindowedLinearFitEstimator::WindowedLinearFitEstimator(
-        size_t headLength, double headFactor, size_t mainLength, double tailFactor)
-    : mHeadFactorInv(1. / headFactor),
-      mTailFactor(tailFactor),
-      mHistoryLength(mainLength + headLength),
-      mHeadLength(headLength) {
-    reset();
-    mXHistory.resize(mHistoryLength);
-    mYHistory.resize(mHistoryLength);
-    mFirstWeight = pow(headFactor, mHeadLength);
-}
-
-WindowedLinearFitEstimator::LinearFit::LinearFit() {
-    reset();
-}
-
-void WindowedLinearFitEstimator::LinearFit::reset() {
-    mX = mXX = mY = mYY = mXY = mW = 0.;
-}
-
-double WindowedLinearFitEstimator::LinearFit::size() const {
-    double s = mW * mW + mX * mX + mY * mY + mXX * mXX + mXY * mXY + mYY * mYY;
-    if (s > 1e72) {
-        // 1e72 corresponds to clock monotonic time of about 8 years
-        ALOGW("estimator is overflowing: w=%g x=%g y=%g xx=%g xy=%g yy=%g",
-              mW, mX, mY, mXX, mXY, mYY);
-    }
-    return s;
-}
-
-void WindowedLinearFitEstimator::LinearFit::add(double x, double y, double w) {
-    mW += w;
-    mX += w * x;
-    mY += w * y;
-    mXX += w * x * x;
-    mXY += w * x * y;
-    mYY += w * y * y;
-}
-
-void WindowedLinearFitEstimator::LinearFit::combine(const LinearFit &lf) {
-    mW += lf.mW;
-    mX += lf.mX;
-    mY += lf.mY;
-    mXX += lf.mXX;
-    mXY += lf.mXY;
-    mYY += lf.mYY;
-}
-
-void WindowedLinearFitEstimator::LinearFit::scale(double w) {
-    mW *= w;
-    mX *= w;
-    mY *= w;
-    mXX *= w;
-    mXY *= w;
-    mYY *= w;
-}
-
-double WindowedLinearFitEstimator::LinearFit::interpolate(double x) {
-    double div = mW * mXX - mX * mX;
-    if (fabs(div) < 1e-5 * mW * mW) {
-        // this only should happen on the first value
-        return x;
-        // assuming a = 1, we could also return x + (mY - mX) / mW;
-    }
-    double a_div = (mW * mXY - mX * mY);
-    double b_div = (mXX * mY - mX * mXY);
-    ALOGV("a=%.4g b=%.4g in=%g out=%g",
-            a_div / div, b_div / div, x, (a_div * x + b_div) / div);
-    return (a_div * x + b_div) / div;
-}
-
-double WindowedLinearFitEstimator::estimate(double x, double y) {
-    /*
-     * TODO: We could update the head by adding the new sample to it
-     * and amplifying it, but this approach can lead to unbounded
-     * error. Instead, we recalculate the head at each step, which
-     * is computationally more expensive. We could balance the two
-     * methods by recalculating just before the error becomes
-     * significant.
-     */
-    const bool update_head = false;
-    if (update_head) {
-        // add new sample to the head
-        mHead.scale(mHeadFactorInv); // amplify head
-        mHead.add(x, y, mFirstWeight);
-    }
-
-    /*
-     * TRICKY: place elements into the circular buffer at decreasing
-     * indices, so that we can access past elements by addition
-     * (thereby avoiding potentially negative indices.)
-     */
-    if (mNumSamples >= mHeadLength) {
-        // move last head sample from head to the main window
-        size_t lastHeadIx = (mSampleIx + mHeadLength) % mHistoryLength;
-        if (update_head) {
-            mHead.add(mXHistory[lastHeadIx], mYHistory[lastHeadIx], -1.); // remove
-        }
-        mMain.add(mXHistory[lastHeadIx], mYHistory[lastHeadIx], 1.);
-        if (mNumSamples >= mHistoryLength) {
-            // move last main sample from main window to tail
-            mMain.add(mXHistory[mSampleIx], mYHistory[mSampleIx], -1.); // remove
-            mTail.add(mXHistory[mSampleIx], mYHistory[mSampleIx], 1.);
-            mTail.scale(mTailFactor); // attenuate tail
-        }
-    }
-
-    mXHistory.editItemAt(mSampleIx) = x;
-    mYHistory.editItemAt(mSampleIx) = y;
-    if (mNumSamples < mHistoryLength) {
-        ++mNumSamples;
-    }
-
-    // recalculate head unless we were using the update method
-    if (!update_head) {
-        mHead.reset();
-        double w = mFirstWeight;
-        for (size_t headIx = 0; headIx < mHeadLength && headIx < mNumSamples; ++headIx) {
-            size_t ix = (mSampleIx + headIx) % mHistoryLength;
-            mHead.add(mXHistory[ix], mYHistory[ix], w);
-            w *= mHeadFactorInv;
-        }
-    }
-
-    if (mSampleIx > 0) {
-        --mSampleIx;
-    } else {
-        mSampleIx = mHistoryLength - 1;
-    }
-
-    // return estimation result
-    LinearFit total;
-    total.combine(mHead);
-    total.combine(mMain);
-    total.combine(mTail);
-    return total.interpolate(x);
-}
-
-void WindowedLinearFitEstimator::reset() {
-    mHead.reset();
-    mMain.reset();
-    mTail.reset();
-    mNumSamples = 0;
-    mSampleIx = mHistoryLength - 1;
-}
-
-}; // namespace android
-
-
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
index e2bc89c..d36ac65 100644
--- a/media/libstagefright/DRMExtractor.cpp
+++ b/media/libstagefright/DRMExtractor.cpp
@@ -35,7 +35,7 @@
 
 class DRMSource : public MediaSource {
 public:
-    DRMSource(const sp<MediaSource> &mediaSource,
+    DRMSource(const sp<IMediaSource> &mediaSource,
             const sp<DecryptHandle> &decryptHandle,
             DrmManagerClient *managerClient,
             int32_t trackId, DrmBuffer *ipmpBox);
@@ -50,7 +50,7 @@
     virtual ~DRMSource();
 
 private:
-    sp<MediaSource> mOriginalMediaSource;
+    sp<IMediaSource> mOriginalMediaSource;
     sp<DecryptHandle> mDecryptHandle;
     DrmManagerClient* mDrmManagerClient;
     size_t mTrackId;
@@ -64,7 +64,7 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-DRMSource::DRMSource(const sp<MediaSource> &mediaSource,
+DRMSource::DRMSource(const sp<IMediaSource> &mediaSource,
         const sp<DecryptHandle> &decryptHandle,
         DrmManagerClient *managerClient,
         int32_t trackId, DrmBuffer *ipmpBox)
@@ -257,8 +257,8 @@
     return mOriginalExtractor->countTracks();
 }
 
-sp<MediaSource> DRMExtractor::getTrack(size_t index) {
-    sp<MediaSource> originalMediaSource = mOriginalExtractor->getTrack(index);
+sp<IMediaSource> DRMExtractor::getTrack(size_t index) {
+    sp<IMediaSource> originalMediaSource = mOriginalExtractor->getTrack(index);
     originalMediaSource->getFormat()->setInt32(kKeyIsDRM, 1);
 
     int32_t trackID;
@@ -268,8 +268,9 @@
     ipmpBox.data = mOriginalExtractor->getDrmTrackInfo(trackID, &(ipmpBox.length));
     CHECK(ipmpBox.length > 0);
 
-    return new DRMSource(originalMediaSource, mDecryptHandle, mDrmManagerClient,
-            trackID, &ipmpBox);
+    return interface_cast<IMediaSource>(
+            new DRMSource(originalMediaSource, mDecryptHandle, mDrmManagerClient,
+            trackID, &ipmpBox));
 }
 
 sp<MetaData> DRMExtractor::getTrackMetaData(size_t index, uint32_t flags) {
diff --git a/media/libstagefright/DataConverter.cpp b/media/libstagefright/DataConverter.cpp
new file mode 100644
index 0000000..aea47f3
--- /dev/null
+++ b/media/libstagefright/DataConverter.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataConverter"
+
+#include "include/DataConverter.h"
+
+#include <audio_utils/primitives.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+namespace android {
+
+status_t DataConverter::convert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+    CHECK(source->base() != target->base());
+    size_t size = targetSize(source->size());
+    status_t err = OK;
+    if (size > target->capacity()) {
+        ALOGE("data size (%zu) is greater than buffer capacity (%zu)",
+                size,          // this is the data received/to be converted
+                target->capacity()); // this is out buffer size
+        err = FAILED_TRANSACTION;
+    } else {
+        err = safeConvert(source, target);
+    }
+    target->setRange(0, err == OK ? size : 0);
+    return err;
+}
+
+status_t DataConverter::safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+    memcpy(target->base(), source->data(), source->size());
+    return OK;
+}
+
+size_t DataConverter::sourceSize(size_t targetSize) {
+    return targetSize;
+}
+
+size_t DataConverter::targetSize(size_t sourceSize) {
+    return sourceSize;
+}
+
+DataConverter::~DataConverter() { }
+
+
+size_t SampleConverterBase::sourceSize(size_t targetSize) {
+    size_t numSamples = targetSize / mTargetSampleSize;
+    if (numSamples > SIZE_MAX / mSourceSampleSize) {
+        ALOGW("limiting source size due to overflow (%zu*%zu/%zu)",
+                targetSize, mSourceSampleSize, mTargetSampleSize);
+        return SIZE_MAX;
+    }
+    return numSamples * mSourceSampleSize;
+}
+
+size_t SampleConverterBase::targetSize(size_t sourceSize) {
+    // we round up on conversion
+    size_t numSamples = divUp(sourceSize, (size_t)mSourceSampleSize);
+    if (numSamples > SIZE_MAX / mTargetSampleSize) {
+        ALOGW("limiting target size due to overflow (%zu*%zu/%zu)",
+                sourceSize, mTargetSampleSize, mSourceSampleSize);
+        return SIZE_MAX;
+    }
+    return numSamples * mTargetSampleSize;
+}
+
+
+static size_t getAudioSampleSize(AudioEncoding e) {
+    switch (e) {
+        case kAudioEncodingPcm16bit: return 2;
+        case kAudioEncodingPcm8bit:  return 1;
+        case kAudioEncodingPcmFloat: return 4;
+        default: return 0;
+    }
+}
+
+
+// static
+AudioConverter* AudioConverter::Create(AudioEncoding source, AudioEncoding target) {
+    uint32_t sourceSampleSize = getAudioSampleSize(source);
+    uint32_t targetSampleSize = getAudioSampleSize(target);
+    if (sourceSampleSize && targetSampleSize && sourceSampleSize != targetSampleSize) {
+        return new AudioConverter(source, sourceSampleSize, target, targetSampleSize);
+    }
+    return NULL;
+}
+
+status_t AudioConverter::safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt) {
+    if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcm16bit) {
+        memcpy_to_u8_from_i16((uint8_t*)tgt->base(), (const int16_t*)src->data(), src->size() / 2);
+    } else if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcmFloat) {
+        memcpy_to_u8_from_float((uint8_t*)tgt->base(), (const float*)src->data(), src->size() / 4);
+    } else if (mTo == kAudioEncodingPcm16bit && mFrom == kAudioEncodingPcm8bit) {
+        memcpy_to_i16_from_u8((int16_t*)tgt->base(), (const uint8_t*)src->data(), src->size());
+    } else if (mTo == kAudioEncodingPcm16bit && mFrom == kAudioEncodingPcmFloat) {
+        memcpy_to_i16_from_float((int16_t*)tgt->base(), (const float*)src->data(), src->size() / 4);
+    } else if (mTo == kAudioEncodingPcmFloat && mFrom == kAudioEncodingPcm8bit) {
+        memcpy_to_float_from_u8((float*)tgt->base(), (const uint8_t*)src->data(), src->size());
+    } else if (mTo == kAudioEncodingPcmFloat && mFrom == kAudioEncodingPcm16bit) {
+        memcpy_to_float_from_i16((float*)tgt->base(), (const int16_t*)src->data(), src->size() / 2);
+    } else {
+        return INVALID_OPERATION;
+    }
+    return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 5020c6c..163a527 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -48,6 +48,8 @@
 
 #include <cutils/properties.h>
 
+#include <private/android_filesystem_config.h>
+
 namespace android {
 
 bool DataSource::getUInt16(off64_t offset, uint16_t *x) {
@@ -173,7 +175,10 @@
     RegisterSniffer_l(SniffMP3);
     RegisterSniffer_l(SniffAAC);
     RegisterSniffer_l(SniffMPEG2PS);
-    RegisterSniffer_l(SniffWVM);
+    if (getuid() == AID_MEDIA) {
+        // WVM only in the media server process
+        RegisterSniffer_l(SniffWVM);
+    }
     RegisterSniffer_l(SniffMidi);
 
     char value[PROPERTY_VALUE_MAX];
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
index 2c39314..2a61c3a 100644
--- a/media/libstagefright/DataURISource.cpp
+++ b/media/libstagefright/DataURISource.cpp
@@ -42,7 +42,8 @@
         AString encoded(commaPos + 1);
 
         // Strip CR and LF...
-        for (size_t i = encoded.size(); i-- > 0;) {
+        for (size_t i = encoded.size(); i > 0;) {
+            i--;
             if (encoded.c_str()[i] == '\r' || encoded.c_str()[i] == '\n') {
                 encoded.erase(i, 1);
             }
diff --git a/media/libstagefright/ESDS.cpp b/media/libstagefright/ESDS.cpp
index 8fbb57c..c31720d 100644
--- a/media/libstagefright/ESDS.cpp
+++ b/media/libstagefright/ESDS.cpp
@@ -18,6 +18,8 @@
 #define LOG_TAG "ESDS"
 #include <utils/Log.h>
 
+#include <media/stagefright/Utils.h>
+
 #include "include/ESDS.h"
 
 #include <string.h>
@@ -194,12 +196,25 @@
     return err;
 }
 
+status_t ESDS::getBitRate(uint32_t *brateMax, uint32_t *brateAvg) const {
+    if (mInitCheck != OK) {
+        return mInitCheck;
+    }
+
+    *brateMax = mBitRateMax;
+    *brateAvg = mBitRateAvg;
+
+    return OK;
+};
+
 status_t ESDS::parseDecoderConfigDescriptor(size_t offset, size_t size) {
     if (size < 13) {
         return ERROR_MALFORMED;
     }
 
     mObjectTypeIndication = mData[offset];
+    mBitRateMax = U32_AT(mData + offset + 5);
+    mBitRateAvg = U32_AT(mData + offset + 9);
 
     offset += 13;
     size -= 13;
diff --git a/media/libstagefright/FLACExtractor.cpp b/media/libstagefright/FLACExtractor.cpp
index 89a91f7..13b66f3 100644
--- a/media/libstagefright/FLACExtractor.cpp
+++ b/media/libstagefright/FLACExtractor.cpp
@@ -615,6 +615,7 @@
             mTrackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
             mTrackMetadata->setInt32(kKeyChannelCount, getChannels());
             mTrackMetadata->setInt32(kKeySampleRate, getSampleRate());
+            mTrackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
             // sample rate is non-zero, so division by zero not possible
             mTrackMetadata->setInt64(kKeyDuration,
                     (getTotalSamples() * 1000000LL) / getSampleRate());
@@ -807,7 +808,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-sp<MediaSource> FLACExtractor::getTrack(size_t index)
+sp<IMediaSource> FLACExtractor::getTrack(size_t index)
 {
     if (mInitCheck != OK || index > 0) {
         return NULL;
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index 565f156..5b92f91 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -20,6 +20,7 @@
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/FileSource.h>
+#include <media/stagefright/Utils.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <sys/types.h>
@@ -32,12 +33,17 @@
     : mFd(-1),
       mOffset(0),
       mLength(-1),
+      mName("<null>"),
       mDecryptHandle(NULL),
       mDrmManagerClient(NULL),
       mDrmBufOffset(0),
       mDrmBufSize(0),
       mDrmBuf(NULL){
 
+    if (filename) {
+        mName = String8::format("FileSource(%s)", filename);
+    }
+    ALOGV("%s", filename);
     mFd = open(filename, O_LARGEFILE | O_RDONLY);
 
     if (mFd >= 0) {
@@ -51,18 +57,51 @@
     : mFd(fd),
       mOffset(offset),
       mLength(length),
+      mName("<null>"),
       mDecryptHandle(NULL),
       mDrmManagerClient(NULL),
       mDrmBufOffset(0),
       mDrmBufSize(0),
-      mDrmBuf(NULL){
-    CHECK(offset >= 0);
-    CHECK(length >= 0);
+      mDrmBuf(NULL) {
+    ALOGV("fd=%d (%s), offset=%lld, length=%lld",
+            fd, nameForFd(fd).c_str(), (long long) offset, (long long) length);
+
+    if (mOffset < 0) {
+        mOffset = 0;
+    }
+    if (mLength < 0) {
+        mLength = 0;
+    }
+    if (mLength > INT64_MAX - mOffset) {
+        mLength = INT64_MAX - mOffset;
+    }
+    struct stat s;
+    if (fstat(fd, &s) == 0) {
+        if (mOffset > s.st_size) {
+            mOffset = s.st_size;
+            mLength = 0;
+        }
+        if (mOffset + mLength > s.st_size) {
+            mLength = s.st_size - mOffset;
+        }
+    }
+    if (mOffset != offset || mLength != length) {
+        ALOGW("offset/length adjusted from %lld/%lld to %lld/%lld",
+                (long long) offset, (long long) length,
+                (long long) mOffset, (long long) mLength);
+    }
+
+    mName = String8::format(
+            "FileSource(fd(%s), %lld, %lld)",
+            nameForFd(fd).c_str(),
+            (long long) mOffset,
+            (long long) mLength);
+
 }
 
 FileSource::~FileSource() {
     if (mFd >= 0) {
-        close(mFd);
+        ::close(mFd);
         mFd = -1;
     }
 
@@ -99,8 +138,8 @@
         if (offset >= mLength) {
             return 0;  // read beyond EOF.
         }
-        int64_t numAvailable = mLength - offset;
-        if ((int64_t)size > numAvailable) {
+        uint64_t numAvailable = mLength - offset;
+        if ((uint64_t)size > numAvailable) {
             size = numAvailable;
         }
     }
@@ -166,7 +205,7 @@
     }
 
     if (mDrmBuf != NULL && mDrmBufSize > 0 && (offset + mOffset) >= mDrmBufOffset
-            && (offset + mOffset + size) <= (mDrmBufOffset + mDrmBufSize)) {
+            && (offset + mOffset + size) <= static_cast<size_t>(mDrmBufOffset + mDrmBufSize)) {
         /* Use buffered data */
         memcpy(data, (void*)(mDrmBuf+(offset+mOffset-mDrmBufOffset)), size);
         return size;
@@ -177,7 +216,7 @@
                 DRM_CACHE_SIZE, offset + mOffset);
         if (mDrmBufSize > 0) {
             int64_t dataRead = 0;
-            dataRead = size > mDrmBufSize ? mDrmBufSize : size;
+            dataRead = size > static_cast<size_t>(mDrmBufSize) ? mDrmBufSize : size;
             memcpy(data, (void*)mDrmBuf, dataRead);
             return dataRead;
         } else {
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp
index 068a77f..0f24329 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libstagefright/HTTPBase.cpp
@@ -38,6 +38,7 @@
       mPrevBandwidthMeasureTimeUs(0),
       mPrevEstimatedBandWidthKbps(0),
       mBandWidthCollectFreqMs(5000) {
+    mName = String8("HTTPBase(<disconnected>)");
 }
 
 void HTTPBase::addBandwidthMeasurement(
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
new file mode 100644
index 0000000..718710a
--- /dev/null
+++ b/media/libstagefright/HevcUtils.cpp
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "HevcUtils"
+
+#include <cstring>
+#include <utility>
+
+#include "include/HevcUtils.h"
+#include "include/avc_utils.h"
+
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+static const uint8_t kHevcNalUnitTypes[5] = {
+    kHevcNalUnitTypeVps,
+    kHevcNalUnitTypeSps,
+    kHevcNalUnitTypePps,
+    kHevcNalUnitTypePrefixSei,
+    kHevcNalUnitTypeSuffixSei,
+};
+
+HevcParameterSets::HevcParameterSets()
+    : mInfo(kInfoNone) {
+}
+
+status_t HevcParameterSets::addNalUnit(const uint8_t* data, size_t size) {
+    uint8_t nalUnitType = (data[0] >> 1) & 0x3f;
+    status_t err = OK;
+    switch (nalUnitType) {
+        case 32:  // VPS
+            err = parseVps(data + 2, size - 2);
+            break;
+        case 33:  // SPS
+            err = parseSps(data + 2, size - 2);
+            break;
+        case 34:  // PPS
+            err = parsePps(data + 2, size - 2);
+            break;
+        case 39:  // Prefix SEI
+        case 40:  // Suffix SEI
+            // Ignore
+            break;
+        default:
+            ALOGE("Unrecognized NAL unit type.");
+            return ERROR_MALFORMED;
+    }
+
+    if (err != OK) {
+        return err;
+    }
+
+    sp<ABuffer> buffer = ABuffer::CreateAsCopy(data, size);
+    buffer->setInt32Data(nalUnitType);
+    mNalUnits.push(buffer);
+    return OK;
+}
+
+template <typename T>
+static bool findParam(uint32_t key, T *param,
+        KeyedVector<uint32_t, uint64_t> &params) {
+    CHECK(param);
+    if (params.indexOfKey(key) < 0) {
+        return false;
+    }
+    *param = (T) params[key];
+    return true;
+}
+
+bool HevcParameterSets::findParam8(uint32_t key, uint8_t *param) {
+    return findParam(key, param, mParams);
+}
+
+bool HevcParameterSets::findParam16(uint32_t key, uint16_t *param) {
+    return findParam(key, param, mParams);
+}
+
+bool HevcParameterSets::findParam32(uint32_t key, uint32_t *param) {
+    return findParam(key, param, mParams);
+}
+
+bool HevcParameterSets::findParam64(uint32_t key, uint64_t *param) {
+    return findParam(key, param, mParams);
+}
+
+size_t HevcParameterSets::getNumNalUnitsOfType(uint8_t type) {
+    size_t num = 0;
+    for (size_t i = 0; i < mNalUnits.size(); ++i) {
+        if (getType(i) == type) {
+            ++num;
+        }
+    }
+    return num;
+}
+
+uint8_t HevcParameterSets::getType(size_t index) {
+    CHECK_LT(index, mNalUnits.size());
+    return mNalUnits[index]->int32Data();
+}
+
+size_t HevcParameterSets::getSize(size_t index) {
+    CHECK_LT(index, mNalUnits.size());
+    return mNalUnits[index]->size();
+}
+
+bool HevcParameterSets::write(size_t index, uint8_t* dest, size_t size) {
+    CHECK_LT(index, mNalUnits.size());
+    const sp<ABuffer>& nalUnit = mNalUnits[index];
+    if (size < nalUnit->size()) {
+        ALOGE("dest buffer size too small: %zu vs. %zu to be written",
+                size, nalUnit->size());
+        return false;
+    }
+    memcpy(dest, nalUnit->data(), nalUnit->size());
+    return true;
+}
+
+status_t HevcParameterSets::parseVps(const uint8_t* data, size_t size) {
+    // See Rec. ITU-T H.265 v3 (04/2015) Chapter 7.3.2.1 for reference
+    NALBitReader reader(data, size);
+    // Skip vps_video_parameter_set_id
+    reader.skipBits(4);
+    // Skip vps_base_layer_internal_flag
+    reader.skipBits(1);
+    // Skip vps_base_layer_available_flag
+    reader.skipBits(1);
+    // Skip vps_max_layers_minus_1
+    reader.skipBits(6);
+    // Skip vps_temporal_id_nesting_flags
+    reader.skipBits(1);
+    // Skip reserved
+    reader.skipBits(16);
+
+    if (reader.atLeastNumBitsLeft(96)) {
+        mParams.add(kGeneralProfileSpace, reader.getBits(2));
+        mParams.add(kGeneralTierFlag, reader.getBits(1));
+        mParams.add(kGeneralProfileIdc, reader.getBits(5));
+        mParams.add(kGeneralProfileCompatibilityFlags, reader.getBits(32));
+        mParams.add(
+                kGeneralConstraintIndicatorFlags,
+                ((uint64_t)reader.getBits(16) << 32) | reader.getBits(32));
+        mParams.add(kGeneralLevelIdc, reader.getBits(8));
+        // 96 bits total for general profile.
+    } else {
+        reader.skipBits(96);
+    }
+
+    return reader.overRead() ? ERROR_MALFORMED : OK;
+}
+
+status_t HevcParameterSets::parseSps(const uint8_t* data, size_t size) {
+    // See Rec. ITU-T H.265 v3 (04/2015) Chapter 7.3.2.2 for reference
+    NALBitReader reader(data, size);
+    // Skip sps_video_parameter_set_id
+    reader.skipBits(4);
+    uint8_t maxSubLayersMinus1 = reader.getBitsWithFallback(3, 0);
+    // Skip sps_temporal_id_nesting_flag;
+    reader.skipBits(1);
+    // Skip general profile
+    reader.skipBits(96);
+    if (maxSubLayersMinus1 > 0) {
+        bool subLayerProfilePresentFlag[8];
+        bool subLayerLevelPresentFlag[8];
+        for (int i = 0; i < maxSubLayersMinus1; ++i) {
+            subLayerProfilePresentFlag[i] = reader.getBitsWithFallback(1, 0);
+            subLayerLevelPresentFlag[i] = reader.getBitsWithFallback(1, 0);
+        }
+        // Skip reserved
+        reader.skipBits(2 * (8 - maxSubLayersMinus1));
+        for (int i = 0; i < maxSubLayersMinus1; ++i) {
+            if (subLayerProfilePresentFlag[i]) {
+                // Skip profile
+                reader.skipBits(88);
+            }
+            if (subLayerLevelPresentFlag[i]) {
+                // Skip sub_layer_level_idc[i]
+                reader.skipBits(8);
+            }
+        }
+    }
+    // Skip sps_seq_parameter_set_id
+    skipUE(&reader);
+    uint8_t chromaFormatIdc = parseUEWithFallback(&reader, 0);
+    mParams.add(kChromaFormatIdc, chromaFormatIdc);
+    if (chromaFormatIdc == 3) {
+        // Skip separate_colour_plane_flag
+        reader.skipBits(1);
+    }
+    // Skip pic_width_in_luma_samples
+    skipUE(&reader);
+    // Skip pic_height_in_luma_samples
+    skipUE(&reader);
+    if (reader.getBitsWithFallback(1, 0) /* i.e. conformance_window_flag */) {
+        // Skip conf_win_left_offset
+        skipUE(&reader);
+        // Skip conf_win_right_offset
+        skipUE(&reader);
+        // Skip conf_win_top_offset
+        skipUE(&reader);
+        // Skip conf_win_bottom_offset
+        skipUE(&reader);
+    }
+    mParams.add(kBitDepthLumaMinus8, parseUEWithFallback(&reader, 0));
+    mParams.add(kBitDepthChromaMinus8, parseUEWithFallback(&reader, 0));
+
+    // log2_max_pic_order_cnt_lsb_minus4
+    size_t log2MaxPicOrderCntLsb = parseUEWithFallback(&reader, 0) + (size_t)4;
+    bool spsSubLayerOrderingInfoPresentFlag = reader.getBitsWithFallback(1, 0);
+    for (uint32_t i = spsSubLayerOrderingInfoPresentFlag ? 0 : maxSubLayersMinus1;
+            i <= maxSubLayersMinus1; ++i) {
+        skipUE(&reader); // sps_max_dec_pic_buffering_minus1[i]
+        skipUE(&reader); // sps_max_num_reorder_pics[i]
+        skipUE(&reader); // sps_max_latency_increase_plus1[i]
+    }
+
+    skipUE(&reader); // log2_min_luma_coding_block_size_minus3
+    skipUE(&reader); // log2_diff_max_min_luma_coding_block_size
+    skipUE(&reader); // log2_min_luma_transform_block_size_minus2
+    skipUE(&reader); // log2_diff_max_min_luma_transform_block_size
+    skipUE(&reader); // max_transform_hierarchy_depth_inter
+    skipUE(&reader); // max_transform_hierarchy_depth_intra
+    if (reader.getBitsWithFallback(1, 0)) { // scaling_list_enabled_flag u(1)
+        // scaling_list_data
+        if (reader.getBitsWithFallback(1, 0)) { // sps_scaling_list_data_present_flag
+            for (uint32_t sizeId = 0; sizeId < 4; ++sizeId) {
+                for (uint32_t matrixId = 0; matrixId < 6; matrixId += (sizeId == 3) ? 3 : 1) {
+                    if (!reader.getBitsWithFallback(1, 1)) {
+                        // scaling_list_pred_mode_flag[sizeId][matrixId]
+                        skipUE(&reader); // scaling_list_pred_matrix_id_delta[sizeId][matrixId]
+                    } else {
+                        uint32_t coefNum = std::min(64, (1 << (4 + (sizeId << 1))));
+                        if (sizeId > 1) {
+                            skipSE(&reader); // scaling_list_dc_coef_minus8[sizeId − 2][matrixId]
+                        }
+                        for (uint32_t i = 0; i < coefNum; ++i) {
+                            skipSE(&reader); // scaling_list_delta_coef
+                        }
+                    }
+                }
+            }
+        }
+    }
+    reader.skipBits(1); // amp_enabled_flag
+    reader.skipBits(1); // sample_adaptive_offset_enabled_flag u(1)
+    if (reader.getBitsWithFallback(1, 0)) { // pcm_enabled_flag
+        reader.skipBits(4); // pcm_sample_bit_depth_luma_minus1
+        reader.skipBits(4); // pcm_sample_bit_depth_chroma_minus1 u(4)
+        skipUE(&reader); // log2_min_pcm_luma_coding_block_size_minus3
+        skipUE(&reader); // log2_diff_max_min_pcm_luma_coding_block_size
+        reader.skipBits(1); // pcm_loop_filter_disabled_flag
+    }
+    uint32_t numShortTermRefPicSets = parseUEWithFallback(&reader, 0);
+    uint32_t numPics = 0;
+    for (uint32_t i = 0; i < numShortTermRefPicSets; ++i) {
+        // st_ref_pic_set(i)
+        if (i != 0 && reader.getBitsWithFallback(1, 0)) { // inter_ref_pic_set_prediction_flag
+            reader.skipBits(1); // delta_rps_sign
+            skipUE(&reader); // abs_delta_rps_minus1
+            uint32_t nextNumPics = 0;
+            for (uint32_t j = 0; j <= numPics; ++j) {
+                if (reader.getBitsWithFallback(1, 0) // used_by_curr_pic_flag[j]
+                        || reader.getBitsWithFallback(1, 0)) { // use_delta_flag[j]
+                    ++nextNumPics;
+                }
+            }
+            numPics = nextNumPics;
+        } else {
+            uint32_t numNegativePics = parseUEWithFallback(&reader, 0);
+            uint32_t numPositivePics = parseUEWithFallback(&reader, 0);
+            if (numNegativePics > UINT32_MAX - numPositivePics) {
+                return ERROR_MALFORMED;
+            }
+            numPics = numNegativePics + numPositivePics;
+            for (uint32_t j = 0; j < numPics; ++j) {
+                skipUE(&reader); // delta_poc_s0|1_minus1[i]
+                reader.skipBits(1); // used_by_curr_pic_s0|1_flag[i]
+            }
+        }
+    }
+    if (reader.getBitsWithFallback(1, 0)) { // long_term_ref_pics_present_flag
+        uint32_t numLongTermRefPicSps = parseUEWithFallback(&reader, 0);
+        for (uint32_t i = 0; i < numLongTermRefPicSps; ++i) {
+            reader.skipBits(log2MaxPicOrderCntLsb); // lt_ref_pic_poc_lsb_sps[i]
+            reader.skipBits(1); // used_by_curr_pic_lt_sps_flag[i]
+        }
+    }
+    reader.skipBits(1); // sps_temporal_mvp_enabled_flag
+    reader.skipBits(1); // strong_intra_smoothing_enabled_flag
+    if (reader.getBitsWithFallback(1, 0)) { // vui_parameters_present_flag
+        if (reader.getBitsWithFallback(1, 0)) { // aspect_ratio_info_present_flag
+            uint32_t aspectRatioIdc = reader.getBitsWithFallback(8, 0);
+            if (aspectRatioIdc == 0xFF /* EXTENDED_SAR */) {
+                reader.skipBits(16); // sar_width
+                reader.skipBits(16); // sar_height
+            }
+        }
+        if (reader.getBitsWithFallback(1, 0)) { // overscan_info_present_flag
+            reader.skipBits(1); // overscan_appropriate_flag
+        }
+        if (reader.getBitsWithFallback(1, 0)) { // video_signal_type_present_flag
+            reader.skipBits(3); // video_format
+            uint32_t videoFullRangeFlag;
+            if (reader.getBitsGraceful(1, &videoFullRangeFlag)) {
+                mParams.add(kVideoFullRangeFlag, videoFullRangeFlag);
+            }
+            if (reader.getBitsWithFallback(1, 0)) { // colour_description_present_flag
+                mInfo = (Info)(mInfo | kInfoHasColorDescription);
+                uint32_t colourPrimaries, transferCharacteristics, matrixCoeffs;
+                if (reader.getBitsGraceful(8, &colourPrimaries)) {
+                    mParams.add(kColourPrimaries, colourPrimaries);
+                }
+                if (reader.getBitsGraceful(8, &transferCharacteristics)) {
+                    mParams.add(kTransferCharacteristics, transferCharacteristics);
+                    if (transferCharacteristics == 16 /* ST 2084 */
+                            || transferCharacteristics == 18 /* ARIB STD-B67 HLG */) {
+                        mInfo = (Info)(mInfo | kInfoIsHdr);
+                    }
+                }
+                if (reader.getBitsGraceful(8, &matrixCoeffs)) {
+                    mParams.add(kMatrixCoeffs, matrixCoeffs);
+                }
+            }
+            // skip rest of VUI
+        }
+    }
+
+    return reader.overRead() ? ERROR_MALFORMED : OK;
+}
+
+status_t HevcParameterSets::parsePps(
+        const uint8_t* data __unused, size_t size __unused) {
+    return OK;
+}
+
+status_t HevcParameterSets::makeHvcc(uint8_t *hvcc, size_t *hvccSize,
+        size_t nalSizeLength) {
+    if (hvcc == NULL || hvccSize == NULL
+            || (nalSizeLength != 4 && nalSizeLength != 2)) {
+        return BAD_VALUE;
+    }
+    // ISO 14496-15: HEVC file format
+    size_t size = 23;  // 23 bytes in the header
+    size_t numOfArrays = 0;
+    const size_t numNalUnits = getNumNalUnits();
+    for (size_t i = 0; i < ARRAY_SIZE(kHevcNalUnitTypes); ++i) {
+        uint8_t type = kHevcNalUnitTypes[i];
+        size_t numNalus = getNumNalUnitsOfType(type);
+        if (numNalus == 0) {
+            continue;
+        }
+        ++numOfArrays;
+        size += 3;
+        for (size_t j = 0; j < numNalUnits; ++j) {
+            if (getType(j) != type) {
+                continue;
+            }
+            size += 2 + getSize(j);
+        }
+    }
+    uint8_t generalProfileSpace, generalTierFlag, generalProfileIdc;
+    if (!findParam8(kGeneralProfileSpace, &generalProfileSpace)
+            || !findParam8(kGeneralTierFlag, &generalTierFlag)
+            || !findParam8(kGeneralProfileIdc, &generalProfileIdc)) {
+        return ERROR_MALFORMED;
+    }
+    uint32_t compatibilityFlags;
+    uint64_t constraintIdcFlags;
+    if (!findParam32(kGeneralProfileCompatibilityFlags, &compatibilityFlags)
+            || !findParam64(kGeneralConstraintIndicatorFlags, &constraintIdcFlags)) {
+        return ERROR_MALFORMED;
+    }
+    uint8_t generalLevelIdc;
+    if (!findParam8(kGeneralLevelIdc, &generalLevelIdc)) {
+        return ERROR_MALFORMED;
+    }
+    uint8_t chromaFormatIdc, bitDepthLumaMinus8, bitDepthChromaMinus8;
+    if (!findParam8(kChromaFormatIdc, &chromaFormatIdc)
+            || !findParam8(kBitDepthLumaMinus8, &bitDepthLumaMinus8)
+            || !findParam8(kBitDepthChromaMinus8, &bitDepthChromaMinus8)) {
+        return ERROR_MALFORMED;
+    }
+    if (size > *hvccSize) {
+        return NO_MEMORY;
+    }
+    *hvccSize = size;
+
+    uint8_t *header = hvcc;
+    header[0] = 1;
+    header[1] = (kGeneralProfileSpace << 6) | (kGeneralTierFlag << 5) | kGeneralProfileIdc;
+    header[2] = (compatibilityFlags >> 24) & 0xff;
+    header[3] = (compatibilityFlags >> 16) & 0xff;
+    header[4] = (compatibilityFlags >> 8) & 0xff;
+    header[5] = compatibilityFlags & 0xff;
+    header[6] = (constraintIdcFlags >> 40) & 0xff;
+    header[7] = (constraintIdcFlags >> 32) & 0xff;
+    header[8] = (constraintIdcFlags >> 24) & 0xff;
+    header[9] = (constraintIdcFlags >> 16) & 0xff;
+    header[10] = (constraintIdcFlags >> 8) & 0xff;
+    header[11] = constraintIdcFlags & 0xff;
+    header[12] = generalLevelIdc;
+    // FIXME: parse min_spatial_segmentation_idc.
+    header[13] = 0xf0;
+    header[14] = 0;
+    // FIXME: derive parallelismType properly.
+    header[15] = 0xfc;
+    header[16] = 0xfc | chromaFormatIdc;
+    header[17] = 0xf8 | bitDepthLumaMinus8;
+    header[18] = 0xf8 | bitDepthChromaMinus8;
+    // FIXME: derive avgFrameRate
+    header[19] = 0;
+    header[20] = 0;
+    // constantFrameRate, numTemporalLayers, temporalIdNested all set to 0.
+    header[21] = nalSizeLength - 1;
+    header[22] = numOfArrays;
+    header += 23;
+    for (size_t i = 0; i < ARRAY_SIZE(kHevcNalUnitTypes); ++i) {
+        uint8_t type = kHevcNalUnitTypes[i];
+        size_t numNalus = getNumNalUnitsOfType(type);
+        if (numNalus == 0) {
+            continue;
+        }
+        // array_completeness set to 0.
+        header[0] = type;
+        header[1] = (numNalus >> 8) & 0xff;
+        header[2] = numNalus & 0xff;
+        header += 3;
+        for (size_t j = 0; j < numNalUnits; ++j) {
+            if (getType(j) != type) {
+                continue;
+            }
+            header[0] = (getSize(j) >> 8) & 0xff;
+            header[1] = getSize(j) & 0xff;
+            if (!write(j, header + 2, size - (header - (uint8_t *)hvcc))) {
+                return NO_MEMORY;
+            }
+            header += (2 + getSize(j));
+        }
+    }
+    CHECK_EQ(header - size, hvcc);
+
+    return OK;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 2e54e8c..82e7a26 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -252,6 +252,7 @@
       mDataSource(source),
       mFirstFramePos(-1),
       mFixedHeader(0) {
+
     off64_t pos = 0;
     off64_t post_id3_pos;
     uint32_t header;
@@ -350,7 +351,13 @@
     if (mSeeker == NULL || !mSeeker->getDuration(&durationUs)) {
         off64_t fileSize;
         if (mDataSource->getSize(&fileSize) == OK) {
-            durationUs = 8000LL * (fileSize - mFirstFramePos) / bitrate;
+            off64_t dataLength = fileSize - mFirstFramePos;
+            if (dataLength > INT64_MAX / 8000LL) {
+                // duration would overflow
+                durationUs = INT64_MAX;
+            } else {
+                durationUs = 8000LL * dataLength / bitrate;
+            }
         } else {
             durationUs = -1;
         }
@@ -400,7 +407,7 @@
     return mInitCheck != OK ? 0 : 1;
 }
 
-sp<MediaSource> MP3Extractor::getTrack(size_t index) {
+sp<IMediaSource> MP3Extractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index ef07aa0..a9e8846 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -35,7 +35,7 @@
 namespace android {
 
 struct MPEG2TSWriter::SourceInfo : public AHandler {
-    SourceInfo(const sp<MediaSource> &source);
+    SourceInfo(const sp<IMediaSource> &source);
 
     void start(const sp<AMessage> &notify);
     void stop();
@@ -69,7 +69,7 @@
         kWhatRead  = 'read',
     };
 
-    sp<MediaSource> mSource;
+    sp<IMediaSource> mSource;
     sp<ALooper> mLooper;
     sp<AMessage> mNotify;
 
@@ -93,7 +93,7 @@
     DISALLOW_EVIL_CONSTRUCTORS(SourceInfo);
 };
 
-MPEG2TSWriter::SourceInfo::SourceInfo(const sp<MediaSource> &source)
+MPEG2TSWriter::SourceInfo::SourceInfo(const sp<IMediaSource> &source)
     : mSource(source),
       mLooper(new ALooper),
       mEOSReceived(false),
@@ -523,7 +523,7 @@
     }
 }
 
-status_t MPEG2TSWriter::addSource(const sp<MediaSource> &source) {
+status_t MPEG2TSWriter::addSource(const sp<IMediaSource> &source) {
     CHECK(!mStarted);
 
     sp<MetaData> meta = source->getFormat();
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
old mode 100755
new mode 100644
index 4c10cc9..6a67fcf
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -34,6 +34,7 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
@@ -50,6 +51,11 @@
 
 namespace android {
 
+enum {
+    // max track header chunk to return
+    kMaxTrackHeaderSize = 32,
+};
+
 class MPEG4Source : public MediaSource {
 public:
     // Caller retains ownership of both "dataSource" and "sampleTable".
@@ -475,6 +481,22 @@
                             ((int64_t)sampleTime * 1000000) / track->timescale);
                 }
             }
+
+            // MPEG2 tracks do not provide CSD, so read the stream header
+            if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
+                off64_t offset;
+                size_t size;
+                if (track->sampleTable->getMetaDataForSample(
+                            0 /* sampleIndex */, &offset, &size, NULL /* sampleTime */) == OK) {
+                    if (size > kMaxTrackHeaderSize) {
+                        size = kMaxTrackHeaderSize;
+                    }
+                    uint8_t header[kMaxTrackHeaderSize];
+                    if (mDataSource->readAt(offset, &header, size) == (ssize_t)size) {
+                        track->meta->setData(kKeyStreamHeader, 'mdat', header, size);
+                    }
+                }
+            }
         }
     }
 
@@ -759,17 +781,31 @@
 }
 
 // Given a time in seconds since Jan 1 1904, produce a human-readable string.
-static void convertTimeToDate(int64_t time_1904, String8 *s) {
-    time_t time_1970 = time_1904 - (((66 * 365 + 17) * 24) * 3600);
+static bool convertTimeToDate(int64_t time_1904, String8 *s) {
+    // delta between mpeg4 time and unix epoch time
+    static const int64_t delta = (((66 * 365 + 17) * 24) * 3600);
+    if (time_1904 < INT64_MIN + delta) {
+        return false;
+    }
+    time_t time_1970 = time_1904 - delta;
 
     char tmp[32];
-    strftime(tmp, sizeof(tmp), "%Y%m%dT%H%M%S.000Z", gmtime(&time_1970));
-
-    s->setTo(tmp);
+    struct tm* tm = gmtime(&time_1970);
+    if (tm != NULL &&
+            strftime(tmp, sizeof(tmp), "%Y%m%dT%H%M%S.000Z", tm) > 0) {
+        s->setTo(tmp);
+        return true;
+    }
+    return false;
 }
 
 status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
     ALOGV("entering parseChunk %lld/%d", (long long)*offset, depth);
+
+    if (*offset < 0) {
+        ALOGE("b/23540914");
+        return ERROR_MALFORMED;
+    }
     uint32_t hdr[2];
     if (mDataSource->readAt(*offset, hdr, 8) < 8) {
         return ERROR_IO;
@@ -835,7 +871,12 @@
 
     PathAdder autoAdder(&mPath, chunk_type);
 
-    off64_t chunk_data_size = *offset + chunk_size - data_offset;
+    // (data_offset - *offset) is either 8 or 16
+    off64_t chunk_data_size = chunk_size - (data_offset - *offset);
+    if (chunk_data_size < 0) {
+        ALOGE("b/23540914");
+        return ERROR_MALFORMED;
+    }
 
     if (chunk_type != FOURCC('c', 'p', 'r', 't')
             && chunk_type != FOURCC('c', 'o', 'v', 'r')
@@ -1031,7 +1072,7 @@
                     int64_t delay = (media_time  * samplerate + 500000) / 1000000;
                     mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
 
-                    int64_t paddingus = duration - (segment_duration + media_time);
+                    int64_t paddingus = duration - (int64_t)(segment_duration + media_time);
                     if (paddingus < 0) {
                         // track duration from media header (which is what kKeyDuration is) might
                         // be slightly shorter than the segment duration, which would make the
@@ -1525,8 +1566,9 @@
 
                 const char *mime;
                 CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
-                if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-                    // AVC requires compression ratio of at least 2, and uses
+                if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+                        || !strcmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+                    // AVC & HEVC requires compression ratio of at least 2, and uses
                     // macroblocks
                     max_size = ((width + 15) / 16) * ((height + 15) / 16) * 192;
                 } else {
@@ -1544,12 +1586,29 @@
             // Calculate average frame rate.
             if (!strncasecmp("video/", mime, 6)) {
                 size_t nSamples = mLastTrack->sampleTable->countSamples();
-                int64_t durationUs;
-                if (mLastTrack->meta->findInt64(kKeyDuration, &durationUs)) {
-                    if (durationUs > 0) {
-                        int32_t frameRate = (nSamples * 1000000LL +
-                                    (durationUs >> 1)) / durationUs;
-                        mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
+                if (nSamples == 0) {
+                    int32_t trackId;
+                    if (mLastTrack->meta->findInt32(kKeyTrackID, &trackId)) {
+                        for (size_t i = 0; i < mTrex.size(); i++) {
+                            Trex *t = &mTrex.editItemAt(i);
+                            if (t->track_ID == (uint32_t) trackId) {
+                                if (t->default_sample_duration > 0) {
+                                    int32_t frameRate =
+                                            mLastTrack->timescale / t->default_sample_duration;
+                                    mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
+                                }
+                                break;
+                            }
+                        }
+                    }
+                } else {
+                    int64_t durationUs;
+                    if (mLastTrack->meta->findInt64(kKeyDuration, &durationUs)) {
+                        if (durationUs > 0) {
+                            int32_t frameRate = (nSamples * 1000000LL +
+                                        (durationUs >> 1)) / durationUs;
+                            mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
+                        }
                     }
                 }
             }
@@ -1705,6 +1764,31 @@
             break;
         }
 
+        case FOURCC('b', 't', 'r', 't'):
+        {
+            *offset += chunk_size;
+
+            uint8_t buffer[12];
+            if (chunk_data_size != sizeof(buffer)) {
+                return ERROR_MALFORMED;
+            }
+
+            if (mDataSource->readAt(
+                    data_offset, buffer, chunk_data_size) < chunk_data_size) {
+                return ERROR_IO;
+            }
+
+            uint32_t maxBitrate = U32_AT(&buffer[4]);
+            uint32_t avgBitrate = U32_AT(&buffer[8]);
+            if (maxBitrate > 0 && maxBitrate < INT32_MAX) {
+                mLastTrack->meta->setInt32(kKeyMaxBitRate, (int32_t)maxBitrate);
+            }
+            if (avgBitrate > 0 && avgBitrate < INT32_MAX) {
+                mLastTrack->meta->setInt32(kKeyBitRate, (int32_t)avgBitrate);
+            }
+            break;
+        }
+
         case FOURCC('a', 'v', 'c', 'C'):
         {
             *offset += chunk_size;
@@ -1883,14 +1967,15 @@
                 }
                 duration = d32;
             }
-            if (duration != 0 && mHeaderTimescale != 0) {
+            if (duration != 0 && mHeaderTimescale != 0 && duration < UINT64_MAX / 1000000) {
                 mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
             }
 
             String8 s;
-            convertTimeToDate(creationTime, &s);
+            if (convertTimeToDate(creationTime, &s)) {
+                mFileMetaData->setCString(kKeyDate, s.string());
+            }
 
-            mFileMetaData->setCString(kKeyDate, s.string());
 
             break;
         }
@@ -2091,6 +2176,21 @@
             break;
         }
 
+        case FOURCC('c', 'o', 'l', 'r'):
+        {
+            *offset += chunk_size;
+            // this must be in a VisualSampleEntry box under the Sample Description Box ('stsd')
+            // ignore otherwise
+            if (depth >= 2 && mPath[depth - 2] == FOURCC('s', 't', 's', 'd')) {
+                status_t err = parseColorInfo(data_offset, chunk_data_size);
+                if (err != OK) {
+                    return err;
+                }
+            }
+
+            break;
+        }
+
         case FOURCC('t', 'i', 't', 'l'):
         case FOURCC('p', 'e', 'r', 'f'):
         case FOURCC('a', 'u', 't', 'h'):
@@ -2671,6 +2771,49 @@
     return OK;
 }
 
+status_t MPEG4Extractor::parseColorInfo(off64_t offset, size_t size) {
+    if (size < 4 || size == SIZE_MAX || mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
+    if (buffer == NULL) {
+        return ERROR_MALFORMED;
+    }
+    if (mDataSource->readAt(offset, buffer, size) != (ssize_t)size) {
+        delete[] buffer;
+        buffer = NULL;
+
+        return ERROR_IO;
+    }
+
+    int32_t type = U32_AT(&buffer[0]);
+    if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
+            || (type == FOURCC('n', 'c', 'l', 'c' && size >= 10))) {
+        int32_t primaries = U16_AT(&buffer[4]);
+        int32_t transfer = U16_AT(&buffer[6]);
+        int32_t coeffs = U16_AT(&buffer[8]);
+        bool fullRange = (type == FOURCC('n', 'c', 'l', 'x')) && (buffer[10] & 128);
+
+        ColorAspects aspects;
+        ColorUtils::convertIsoColorAspectsToCodecAspects(
+                primaries, transfer, coeffs, fullRange, aspects);
+
+        // only store the first color specification
+        if (!mLastTrack->meta->hasData(kKeyColorPrimaries)) {
+            mLastTrack->meta->setInt32(kKeyColorPrimaries, aspects.mPrimaries);
+            mLastTrack->meta->setInt32(kKeyTransferFunction, aspects.mTransfer);
+            mLastTrack->meta->setInt32(kKeyColorMatrix, aspects.mMatrixCoeffs);
+            mLastTrack->meta->setInt32(kKeyColorRange, aspects.mRange);
+        }
+    }
+
+    delete[] buffer;
+    buffer = NULL;
+
+    return OK;
+}
+
 status_t MPEG4Extractor::parse3GPPMetaData(off64_t offset, size_t size, int depth) {
     if (size < 4 || size == SIZE_MAX) {
         return ERROR_MALFORMED;
@@ -2843,7 +2986,7 @@
     }
 }
 
-sp<MediaSource> MPEG4Extractor::getTrack(size_t index) {
+sp<IMediaSource> MPEG4Extractor::getTrack(size_t index) {
     status_t err;
     if ((err = readMetaData()) != OK) {
         return NULL;
@@ -2868,7 +3011,7 @@
     int32_t trackId;
     if (track->meta->findInt32(kKeyTrackID, &trackId)) {
         for (size_t i = 0; i < mTrex.size(); i++) {
-            Trex *t = &mTrex.editItemAt(index);
+            Trex *t = &mTrex.editItemAt(i);
             if (t->track_ID == (uint32_t) trackId) {
                 trex = t;
                 break;
@@ -2881,6 +3024,39 @@
 
     ALOGV("getTrack called, pssh: %zu", mPssh.size());
 
+    const char *mime;
+    if (!track->meta->findCString(kKeyMIMEType, &mime)) {
+        return NULL;
+    }
+
+    if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+        uint32_t type;
+        const void *data;
+        size_t size;
+        if (!track->meta->findData(kKeyAVCC, &type, &data, &size)) {
+            return NULL;
+        }
+
+        const uint8_t *ptr = (const uint8_t *)data;
+
+        if (size < 7 || ptr[0] != 1) {  // configurationVersion == 1
+            return NULL;
+        }
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+        uint32_t type;
+        const void *data;
+        size_t size;
+        if (!track->meta->findData(kKeyHVCC, &type, &data, &size)) {
+            return NULL;
+        }
+
+        const uint8_t *ptr = (const uint8_t *)data;
+
+        if (size < 22 || ptr[0] != 1) {  // configurationVersion == 1
+            return NULL;
+        }
+    }
+
     return new MPEG4Source(this,
             track->meta, mDataSource, track->timescale, track->sampleTable,
             mSidxEntries, trex, mMoofOffset);
@@ -3336,7 +3512,7 @@
 
         const uint8_t *ptr = (const uint8_t *)data;
 
-        CHECK(size >= 7);
+        CHECK(size >= 22);
         CHECK_EQ((unsigned)ptr[0], 1u);  // configurationVersion == 1
 
         mNALLengthSize = 1 + (ptr[14 + 7] & 3);
@@ -4732,12 +4908,18 @@
                 // The smallest valid chunk is 16 bytes long in this case.
                 return false;
             }
+
         } else if (chunkSize < 8) {
             // The smallest valid chunk is 8 bytes long.
             return false;
         }
 
-        off64_t chunkDataSize = offset + chunkSize - chunkDataOffset;
+        // (data_offset - offset) is either 8 or 16
+        off64_t chunkDataSize = chunkSize - (chunkDataOffset - offset);
+        if (chunkDataSize < 0) {
+            ALOGE("b/23540914");
+            return ERROR_MALFORMED;
+        }
 
         char chunkstring[5];
         MakeFourCCString(chunkType, chunkstring);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 47f114a..24fb987 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -30,6 +30,7 @@
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/MPEG4Writer.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MetaData.h>
@@ -41,7 +42,7 @@
 #include <cutils/properties.h>
 
 #include "include/ESDS.h"
-
+#include "include/HevcUtils.h"
 
 #ifndef __predict_false
 #define __predict_false(exp) __builtin_expect((exp) != 0, 0)
@@ -70,12 +71,24 @@
 #endif
 static const char kMetaKey_CaptureFps[] = "com.android.capture.fps";
 
+static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
+    kHevcNalUnitTypeVps,
+    kHevcNalUnitTypeSps,
+    kHevcNalUnitTypePps,
+};
+static const uint8_t kHevcNalUnitTypes[5] = {
+    kHevcNalUnitTypeVps,
+    kHevcNalUnitTypeSps,
+    kHevcNalUnitTypePps,
+    kHevcNalUnitTypePrefixSei,
+    kHevcNalUnitTypeSuffixSei,
+};
 /* uncomment to include model and build in meta */
 //#define SHOW_MODEL_BUILD 1
 
 class MPEG4Writer::Track {
 public:
-    Track(MPEG4Writer *owner, const sp<MediaSource> &source, size_t trackId);
+    Track(MPEG4Writer *owner, const sp<IMediaSource> &source, size_t trackId);
 
     ~Track();
 
@@ -89,6 +102,7 @@
     void writeTrackHeader(bool use32BitOffset = true);
     void bufferChunk(int64_t timestampUs);
     bool isAvc() const { return mIsAvc; }
+    bool isHevc() const { return mIsHevc; }
     bool isAudio() const { return mIsAudio; }
     bool isMPEG4() const { return mIsMPEG4; }
     void addChunkOffset(off64_t offset);
@@ -228,12 +242,13 @@
 
     MPEG4Writer *mOwner;
     sp<MetaData> mMeta;
-    sp<MediaSource> mSource;
+    sp<IMediaSource> mSource;
     volatile bool mDone;
     volatile bool mPaused;
     volatile bool mResumed;
     volatile bool mStarted;
     bool mIsAvc;
+    bool mIsHevc;
     bool mIsAudio;
     bool mIsMPEG4;
     int32_t mTrackId;
@@ -299,10 +314,17 @@
     const uint8_t *parseParamSet(
         const uint8_t *data, size_t length, int type, size_t *paramSetLen);
 
+    status_t copyCodecSpecificData(const uint8_t *data, size_t size, size_t minLength = 0);
+
     status_t makeAVCCodecSpecificData(const uint8_t *data, size_t size);
     status_t copyAVCCodecSpecificData(const uint8_t *data, size_t size);
     status_t parseAVCCodecSpecificData(const uint8_t *data, size_t size);
 
+    status_t makeHEVCCodecSpecificData(const uint8_t *data, size_t size);
+    status_t copyHEVCCodecSpecificData(const uint8_t *data, size_t size);
+    status_t parseHEVCCodecSpecificData(
+            const uint8_t *data, size_t size, HevcParameterSets &paramSets);
+
     // Track authoring progress status
     void trackProgressStatus(int64_t timeUs, status_t err = OK);
     void initTrackingProgressStatus(MetaData *params);
@@ -340,6 +362,7 @@
     void writeD263Box();
     void writePaspBox();
     void writeAvccBox();
+    void writeHvccBox();
     void writeUrlBox();
     void writeDrefBox();
     void writeDinfBox();
@@ -349,6 +372,7 @@
     void writeVmhdBox();
     void writeHdlrBox();
     void writeTkhdBox(uint32_t now);
+    void writeColrBox();
     void writeMp4aEsdsBox();
     void writeMp4vEsdsBox();
     void writeAudioFourCCBox();
@@ -463,6 +487,8 @@
             return "s263";
         } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
             return "avc1";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
+            return "hvc1";
         }
     } else {
         ALOGE("Track (%s) other than video or audio is not supported", mime);
@@ -470,7 +496,7 @@
     return NULL;
 }
 
-status_t MPEG4Writer::addSource(const sp<MediaSource> &source) {
+status_t MPEG4Writer::addSource(const sp<IMediaSource> &source) {
     Mutex::Autolock l(mLock);
     if (mStarted) {
         ALOGE("Attempt to add source AFTER recording is started");
@@ -999,7 +1025,11 @@
     // MP4 file uses time counting seconds since midnight, Jan. 1, 1904
     // while time function returns Unix epoch values which starts
     // at 1970-01-01. Lets add the number of seconds between them
-    uint32_t mpeg4Time = now + (66 * 365 + 17) * (24 * 60 * 60);
+    static const uint32_t delta = (66 * 365 + 17) * (24 * 60 * 60);
+    if (now < 0 || uint32_t(now) > UINT32_MAX - delta) {
+        return 0;
+    }
+    uint32_t mpeg4Time = uint32_t(now) + delta;
     return mpeg4Time;
 }
 
@@ -1432,7 +1462,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 MPEG4Writer::Track::Track(
-        MPEG4Writer *owner, const sp<MediaSource> &source, size_t trackId)
+        MPEG4Writer *owner, const sp<IMediaSource> &source, size_t trackId)
     : mOwner(owner),
       mMeta(source->getFormat()),
       mSource(source),
@@ -1461,6 +1491,7 @@
     const char *mime;
     mMeta->findCString(kKeyMIMEType, &mime);
     mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
+    mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
     mIsAudio = !strncasecmp(mime, "audio/", 6);
     mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
                !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
@@ -1556,31 +1587,26 @@
     const char *mime;
     CHECK(mMeta->findCString(kKeyMIMEType, &mime));
 
+    uint32_t type;
+    const void *data = NULL;
+    size_t size = 0;
     if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-        uint32_t type;
-        const void *data;
-        size_t size;
-        if (mMeta->findData(kKeyAVCC, &type, &data, &size)) {
-            mCodecSpecificData = malloc(size);
-            mCodecSpecificDataSize = size;
-            memcpy(mCodecSpecificData, data, size);
-            mGotAllCodecSpecificData = true;
-        }
+        mMeta->findData(kKeyAVCC, &type, &data, &size);
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+        mMeta->findData(kKeyHVCC, &type, &data, &size);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
             || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
-        uint32_t type;
-        const void *data;
-        size_t size;
         if (mMeta->findData(kKeyESDS, &type, &data, &size)) {
             ESDS esds(data, size);
-            if (esds.getCodecSpecificInfo(&data, &size) == OK) {
-                mCodecSpecificData = malloc(size);
-                mCodecSpecificDataSize = size;
-                memcpy(mCodecSpecificData, data, size);
-                mGotAllCodecSpecificData = true;
+            if (esds.getCodecSpecificInfo(&data, &size) != OK) {
+                data = NULL;
+                size = 0;
             }
         }
     }
+    if (data != NULL && copyCodecSpecificData((uint8_t *)data, size) == OK) {
+        mGotAllCodecSpecificData = true;
+    }
 }
 
 MPEG4Writer::Track::~Track() {
@@ -1657,7 +1683,7 @@
     while (!chunk->mSamples.empty()) {
         List<MediaBuffer *>::iterator it = chunk->mSamples.begin();
 
-        off64_t offset = chunk->mTrack->isAvc()
+        off64_t offset = (chunk->mTrack->isAvc() || chunk->mTrack->isHevc())
                                 ? addLengthPrefixedSample_l(*it)
                                 : addSample_l(*it);
 
@@ -1903,22 +1929,6 @@
     *type = (byte & 0x1F);
 }
 
-static const uint8_t *findNextStartCode(
-        const uint8_t *data, size_t length) {
-
-    ALOGV("findNextStartCode: %p %zu", data, length);
-
-    size_t bytesLeft = length;
-    while (bytesLeft > 4  &&
-            memcmp("\x00\x00\x00\x01", &data[length - bytesLeft], 4)) {
-        --bytesLeft;
-    }
-    if (bytesLeft <= 4) {
-        bytesLeft = 0; // Last parameter set
-    }
-    return &data[length - bytesLeft];
-}
-
 const uint8_t *MPEG4Writer::Track::parseParamSet(
         const uint8_t *data, size_t length, int type, size_t *paramSetLen) {
 
@@ -1926,7 +1936,7 @@
     CHECK(type == kNalUnitTypeSeqParamSet ||
           type == kNalUnitTypePicParamSet);
 
-    const uint8_t *nextStartCode = findNextStartCode(data, length);
+    const uint8_t *nextStartCode = findNextNalStartCode(data, length);
     *paramSetLen = nextStartCode - data;
     if (*paramSetLen == 0) {
         ALOGE("Param set is malformed, since its length is 0");
@@ -1947,6 +1957,7 @@
             if (mProfileIdc != data[1] ||
                 mProfileCompatible != data[2] ||
                 mLevelIdc != data[3]) {
+                // COULD DO: set profile/level to the lowest required to support all SPSs
                 ALOGE("Inconsistent profile/level found in seq parameter sets");
                 return NULL;
             }
@@ -1964,13 +1975,30 @@
 
     // 2 bytes for each of the parameter set length field
     // plus the 7 bytes for the header
-    if (size < 4 + 7) {
+    return copyCodecSpecificData(data, size, 4 + 7);
+}
+
+status_t MPEG4Writer::Track::copyHEVCCodecSpecificData(
+        const uint8_t *data, size_t size) {
+    ALOGV("copyHEVCCodecSpecificData");
+
+    // Min length of HEVC CSD is 23. (ISO/IEC 14496-15:2014 Chapter 8.3.3.1.2)
+    return copyCodecSpecificData(data, size, 23);
+}
+
+status_t MPEG4Writer::Track::copyCodecSpecificData(
+        const uint8_t *data, size_t size, size_t minLength) {
+    if (size < minLength) {
         ALOGE("Codec specific data length too short: %zu", size);
         return ERROR_MALFORMED;
     }
 
-    mCodecSpecificDataSize = size;
     mCodecSpecificData = malloc(size);
+    if (mCodecSpecificData == NULL) {
+        ALOGE("Failed allocating codec specific data");
+        return NO_MEMORY;
+    }
+    mCodecSpecificDataSize = size;
     memcpy(mCodecSpecificData, data, size);
     return OK;
 }
@@ -2093,6 +2121,11 @@
     // ISO 14496-15: AVC file format
     mCodecSpecificDataSize += 7;  // 7 more bytes in the header
     mCodecSpecificData = malloc(mCodecSpecificDataSize);
+    if (mCodecSpecificData == NULL) {
+        mCodecSpecificDataSize = 0;
+        ALOGE("Failed allocating codec specific data");
+        return NO_MEMORY;
+    }
     uint8_t *header = (uint8_t *)mCodecSpecificData;
     header[0] = 1;                     // version
     header[1] = mProfileIdc;           // profile indication
@@ -2141,6 +2174,95 @@
     return OK;
 }
 
+
+status_t MPEG4Writer::Track::parseHEVCCodecSpecificData(
+        const uint8_t *data, size_t size, HevcParameterSets &paramSets) {
+
+    ALOGV("parseHEVCCodecSpecificData");
+    const uint8_t *tmp = data;
+    const uint8_t *nextStartCode = data;
+    size_t bytesLeft = size;
+    while (bytesLeft > 4 && !memcmp("\x00\x00\x00\x01", tmp, 4)) {
+        nextStartCode = findNextNalStartCode(tmp + 4, bytesLeft - 4);
+        status_t err = paramSets.addNalUnit(tmp + 4, (nextStartCode - tmp) - 4);
+        if (err != OK) {
+            return ERROR_MALFORMED;
+        }
+
+        // Move on to find the next parameter set
+        bytesLeft -= nextStartCode - tmp;
+        tmp = nextStartCode;
+    }
+
+    size_t csdSize = 23;
+    const size_t numNalUnits = paramSets.getNumNalUnits();
+    for (size_t i = 0; i < ARRAY_SIZE(kMandatoryHevcNalUnitTypes); ++i) {
+        int type = kMandatoryHevcNalUnitTypes[i];
+        size_t numParamSets = paramSets.getNumNalUnitsOfType(type);
+        if (numParamSets == 0) {
+            ALOGE("Cound not find NAL unit of type %d", type);
+            return ERROR_MALFORMED;
+        }
+    }
+    for (size_t i = 0; i < ARRAY_SIZE(kHevcNalUnitTypes); ++i) {
+        int type = kHevcNalUnitTypes[i];
+        size_t numParamSets = paramSets.getNumNalUnitsOfType(type);
+        if (numParamSets > 0xffff) {
+            ALOGE("Too many seq parameter sets (%zu) found", numParamSets);
+            return ERROR_MALFORMED;
+        }
+        csdSize += 3;
+        for (size_t j = 0; j < numNalUnits; ++j) {
+            if (paramSets.getType(j) != type) {
+                continue;
+            }
+            csdSize += 2 + paramSets.getSize(j);
+        }
+    }
+    mCodecSpecificDataSize = csdSize;
+    return OK;
+}
+
+status_t MPEG4Writer::Track::makeHEVCCodecSpecificData(
+        const uint8_t *data, size_t size) {
+
+    if (mCodecSpecificData != NULL) {
+        ALOGE("Already have codec specific data");
+        return ERROR_MALFORMED;
+    }
+
+    if (size < 4) {
+        ALOGE("Codec specific data length too short: %zu", size);
+        return ERROR_MALFORMED;
+    }
+
+    // Data is in the form of HEVCCodecSpecificData
+    if (memcmp("\x00\x00\x00\x01", data, 4)) {
+        return copyHEVCCodecSpecificData(data, size);
+    }
+
+    HevcParameterSets paramSets;
+    if (parseHEVCCodecSpecificData(data, size, paramSets) != OK) {
+        ALOGE("failed parsing codec specific data");
+        return ERROR_MALFORMED;
+    }
+
+    mCodecSpecificData = malloc(mCodecSpecificDataSize);
+    if (mCodecSpecificData == NULL) {
+        mCodecSpecificDataSize = 0;
+        ALOGE("Failed allocating codec specific data");
+        return NO_MEMORY;
+    }
+    status_t err = paramSets.makeHvcc((uint8_t *)mCodecSpecificData,
+            &mCodecSpecificDataSize, mOwner->useNalLengthFour() ? 4 : 2);
+    if (err != OK) {
+        ALOGE("failed constructing HVCC atom");
+        return err;
+    }
+
+    return OK;
+}
+
 /*
  * Updates the drift time from the audio track so that
  * the video track can get the updated drift time information
@@ -2164,6 +2286,7 @@
     const bool hasMultipleTracks = (mOwner->numTracks() > 1);
     int64_t chunkTimestampUs = 0;
     int32_t nChunks = 0;
+    int32_t nActualFrames = 0;        // frames containing non-CSD data (non-0 length)
     int32_t nZeroLengthFrames = 0;
     int64_t lastTimestampUs = 0;      // Previous sample time stamp
     int64_t lastDurationUs = 0;       // Between the previous two samples
@@ -2216,21 +2339,31 @@
         int32_t isCodecConfig;
         if (buffer->meta_data()->findInt32(kKeyIsCodecConfig, &isCodecConfig)
                 && isCodecConfig) {
-            CHECK(!mGotAllCodecSpecificData);
+            // if config format (at track addition) already had CSD, keep that
+            // UNLESS we have not received any frames yet.
+            // TODO: for now the entire CSD has to come in one frame for encoders, even though
+            // they need to be spread out for decoders.
+            if (mGotAllCodecSpecificData && nActualFrames > 0) {
+                ALOGI("ignoring additional CSD for video track after first frame");
+            } else {
+                mMeta = mSource->getFormat(); // get output format after format change
 
-            if (mIsAvc) {
-                status_t err = makeAVCCodecSpecificData(
-                        (const uint8_t *)buffer->data()
-                            + buffer->range_offset(),
-                        buffer->range_length());
-                CHECK_EQ((status_t)OK, err);
-            } else if (mIsMPEG4) {
-                mCodecSpecificDataSize = buffer->range_length();
-                mCodecSpecificData = malloc(mCodecSpecificDataSize);
-                memcpy(mCodecSpecificData,
-                        (const uint8_t *)buffer->data()
-                            + buffer->range_offset(),
-                       buffer->range_length());
+                if (mIsAvc) {
+                    status_t err = makeAVCCodecSpecificData(
+                            (const uint8_t *)buffer->data()
+                                + buffer->range_offset(),
+                            buffer->range_length());
+                    CHECK_EQ((status_t)OK, err);
+                } else if (mIsHevc) {
+                    status_t err = makeHEVCCodecSpecificData(
+                            (const uint8_t *)buffer->data()
+                                + buffer->range_offset(),
+                            buffer->range_length());
+                    CHECK_EQ((status_t)OK, err);
+                } else if (mIsMPEG4) {
+                    copyCodecSpecificData((const uint8_t *)buffer->data() + buffer->range_offset(),
+                            buffer->range_length());
+                }
             }
 
             buffer->release();
@@ -2240,6 +2373,8 @@
             continue;
         }
 
+        ++nActualFrames;
+
         // Make a deep copy of the MediaBuffer and Metadata and release
         // the original as soon as we can
         MediaBuffer *copy = new MediaBuffer(buffer->range_length());
@@ -2250,10 +2385,10 @@
         buffer->release();
         buffer = NULL;
 
-        if (mIsAvc) StripStartcode(copy);
+        if (mIsAvc || mIsHevc) StripStartcode(copy);
 
         size_t sampleSize = copy->range_length();
-        if (mIsAvc) {
+        if (mIsAvc || mIsHevc) {
             if (mOwner->useNalLengthFour()) {
                 sampleSize += 4;
             } else {
@@ -2266,10 +2401,14 @@
         updateTrackSizeEstimate();
 
         if (mOwner->exceedsFileSizeLimit()) {
+            ALOGW("Recorded file size exceeds limit %" PRId64 "bytes",
+                    mOwner->mMaxFileSizeLimitBytes);
             mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
             break;
         }
         if (mOwner->exceedsFileDurationLimit()) {
+            ALOGW("Recorded file duration exceeds limit %" PRId64 "microseconds",
+                    mOwner->mMaxFileDurationLimitUs);
             mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
             break;
         }
@@ -2395,9 +2534,10 @@
             ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
                 (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
         if (currDurationTicks < 0ll) {
-            ALOGE("timestampUs %" PRId64 " < lastTimestampUs %" PRId64 " for %s track",
-                timestampUs, lastTimestampUs, trackName);
+            ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
+                    (long long)timestampUs, (long long)lastTimestampUs, trackName);
             copy->release();
+            mSource->stop();
             return UNKNOWN_ERROR;
         }
 
@@ -2453,7 +2593,7 @@
             trackProgressStatus(timestampUs);
         }
         if (!hasMultipleTracks) {
-            off64_t offset = mIsAvc? mOwner->addLengthPrefixedSample_l(copy)
+            off64_t offset = (mIsAvc || mIsHevc) ? mOwner->addLengthPrefixedSample_l(copy)
                                  : mOwner->addSample_l(copy);
 
             uint32_t count = (mOwner->use32BitFileOffset()
@@ -2705,7 +2845,8 @@
     CHECK(mMeta->findCString(kKeyMIMEType, &mime));
     if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime) ||
         !strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime) ||
-        !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
+        !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime) ||
+        !strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
         if (!mCodecSpecificData ||
             mCodecSpecificDataSize <= 0) {
             ALOGE("Missing codec specific data");
@@ -2811,12 +2952,37 @@
         writeD263Box();
     } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
         writeAvccBox();
+    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
+        writeHvccBox();
     }
 
     writePaspBox();
+    writeColrBox();
     mOwner->endBox();  // mp4v, s263 or avc1
 }
 
+void MPEG4Writer::Track::writeColrBox() {
+    ColorAspects aspects;
+    memset(&aspects, 0, sizeof(aspects));
+    // TRICKY: using | instead of || because we want to execute all findInt32-s
+    if (mMeta->findInt32(kKeyColorPrimaries, (int32_t*)&aspects.mPrimaries)
+            | mMeta->findInt32(kKeyTransferFunction, (int32_t*)&aspects.mTransfer)
+            | mMeta->findInt32(kKeyColorMatrix, (int32_t*)&aspects.mMatrixCoeffs)
+            | mMeta->findInt32(kKeyColorRange, (int32_t*)&aspects.mRange)) {
+        int32_t primaries, transfer, coeffs;
+        bool fullRange;
+        ColorUtils::convertCodecColorAspectsToIsoAspects(
+                aspects, &primaries, &transfer, &coeffs, &fullRange);
+        mOwner->beginBox("colr");
+        mOwner->writeFourcc("nclx");
+        mOwner->writeInt16(primaries);
+        mOwner->writeInt16(transfer);
+        mOwner->writeInt16(coeffs);
+        mOwner->writeInt8(fullRange ? 128 : 0);
+        mOwner->endBox(); // colr
+    }
+}
+
 void MPEG4Writer::Track::writeAudioFourCCBox() {
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
@@ -2873,11 +3039,14 @@
     mOwner->writeInt8(0x15);   // streamType AudioStream
 
     mOwner->writeInt16(0x03);  // XXX
-    mOwner->writeInt8(0x00);   // buffer size 24-bit
-    int32_t bitRate;
-    bool success = mMeta->findInt32(kKeyBitRate, &bitRate);
-    mOwner->writeInt32(success ? bitRate : 96000); // max bit rate
-    mOwner->writeInt32(success ? bitRate : 96000); // avg bit rate
+    mOwner->writeInt8(0x00);   // buffer size 24-bit (0x300)
+
+    int32_t avgBitrate = 0;
+    (void)mMeta->findInt32(kKeyBitRate, &avgBitrate);
+    int32_t maxBitrate = 0;
+    (void)mMeta->findInt32(kKeyMaxBitRate, &maxBitrate);
+    mOwner->writeInt32(maxBitrate);
+    mOwner->writeInt32(avgBitrate);
 
     mOwner->writeInt8(0x05);   // DecoderSpecificInfoTag
     mOwner->writeInt8(mCodecSpecificDataSize);
@@ -2911,12 +3080,17 @@
     mOwner->writeInt8(0x11);  // streamType VisualStream
 
     static const uint8_t kData[] = {
-        0x01, 0x77, 0x00,
-        0x00, 0x03, 0xe8, 0x00,
-        0x00, 0x03, 0xe8, 0x00
+        0x01, 0x77, 0x00, // buffer size 96000 bytes
     };
     mOwner->write(kData, sizeof(kData));
 
+    int32_t avgBitrate = 0;
+    (void)mMeta->findInt32(kKeyBitRate, &avgBitrate);
+    int32_t maxBitrate = 0;
+    (void)mMeta->findInt32(kKeyMaxBitRate, &maxBitrate);
+    mOwner->writeInt32(maxBitrate);
+    mOwner->writeInt32(avgBitrate);
+
     mOwner->writeInt8(0x05);  // DecoderSpecificInfoTag
 
     mOwner->writeInt8(mCodecSpecificDataSize);
@@ -3066,6 +3240,20 @@
     mOwner->endBox();  // avcC
 }
 
+
+void MPEG4Writer::Track::writeHvccBox() {
+    CHECK(mCodecSpecificData);
+    CHECK_GE(mCodecSpecificDataSize, 5);
+
+    // Patch avcc's lengthSize field to match the number
+    // of bytes we use to indicate the size of a nal unit.
+    uint8_t *ptr = (uint8_t *)mCodecSpecificData;
+    ptr[21] = (ptr[21] & 0xfc) | (mOwner->useNalLengthFour() ? 3 : 1);
+    mOwner->beginBox("hvcC");
+    mOwner->write(mCodecSpecificData, mCodecSpecificDataSize);
+    mOwner->endBox();  // hvcC
+}
+
 void MPEG4Writer::Track::writeD263Box() {
     mOwner->beginBox("d263");
     mOwner->writeInt32(0);  // vendor
diff --git a/media/libstagefright/MediaBufferGroup.cpp b/media/libstagefright/MediaBufferGroup.cpp
deleted file mode 100644
index 6ac6d4a..0000000
--- a/media/libstagefright/MediaBufferGroup.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaBufferGroup"
-#include <utils/Log.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-
-namespace android {
-
-MediaBufferGroup::MediaBufferGroup()
-    : mFirstBuffer(NULL),
-      mLastBuffer(NULL) {
-}
-
-MediaBufferGroup::~MediaBufferGroup() {
-    MediaBuffer *next;
-    for (MediaBuffer *buffer = mFirstBuffer; buffer != NULL;
-         buffer = next) {
-        next = buffer->nextBuffer();
-
-        CHECK_EQ(buffer->refcount(), 0);
-
-        buffer->setObserver(NULL);
-        buffer->release();
-    }
-}
-
-void MediaBufferGroup::add_buffer(MediaBuffer *buffer) {
-    Mutex::Autolock autoLock(mLock);
-
-    buffer->setObserver(this);
-
-    if (mLastBuffer) {
-        mLastBuffer->setNextBuffer(buffer);
-    } else {
-        mFirstBuffer = buffer;
-    }
-
-    mLastBuffer = buffer;
-}
-
-status_t MediaBufferGroup::acquire_buffer(
-        MediaBuffer **out, bool nonBlocking) {
-    Mutex::Autolock autoLock(mLock);
-
-    for (;;) {
-        for (MediaBuffer *buffer = mFirstBuffer;
-             buffer != NULL; buffer = buffer->nextBuffer()) {
-            if (buffer->refcount() == 0) {
-                buffer->add_ref();
-                buffer->reset();
-
-                *out = buffer;
-                goto exit;
-            }
-        }
-
-        if (nonBlocking) {
-            *out = NULL;
-            return WOULD_BLOCK;
-        }
-
-        // All buffers are in use. Block until one of them is returned to us.
-        mCondition.wait(mLock);
-    }
-
-exit:
-    return OK;
-}
-
-void MediaBufferGroup::signalBufferReturned(MediaBuffer *) {
-    Mutex::Autolock autoLock(mLock);
-    mCondition.signal();
-}
-
-}  // namespace android
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
index 2641e4e..3aa0061 100644
--- a/media/libstagefright/MediaClock.cpp
+++ b/media/libstagefright/MediaClock.cpp
@@ -25,6 +25,10 @@
 
 namespace android {
 
+// Maximum allowed time backwards from anchor change.
+// If larger than this threshold, it's treated as discontinuity.
+static const int64_t kAnchorFluctuationAllowedUs = 10000ll;
+
 MediaClock::MediaClock()
     : mAnchorTimeMediaUs(-1),
       mAnchorTimeRealUs(-1),
@@ -64,9 +68,20 @@
         ALOGW("reject anchor time since it leads to negative media time.");
         return;
     }
+
+    if (maxTimeMediaUs != -1) {
+        mMaxTimeMediaUs = maxTimeMediaUs;
+    }
+    if (mAnchorTimeRealUs != -1) {
+        int64_t oldNowMediaUs =
+            mAnchorTimeMediaUs + (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
+        if (nowMediaUs < oldNowMediaUs
+                && nowMediaUs > oldNowMediaUs - kAnchorFluctuationAllowedUs) {
+            return;
+        }
+    }
     mAnchorTimeRealUs = nowUs;
     mAnchorTimeMediaUs = nowMediaUs;
-    mMaxTimeMediaUs = maxTimeMediaUs;
 }
 
 void MediaClock::updateMaxTimeMedia(int64_t maxTimeMediaUs) {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c2ffdf2..ff5c4d4 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -44,7 +44,6 @@
 #include <media/stagefright/MediaFilter.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
 #include <media/stagefright/PersistentSurface.h>
 #include <media/stagefright/SurfaceUtils.h>
 #include <mediautils/BatteryNotifier.h>
@@ -171,7 +170,7 @@
 
 // static
 sp<MediaCodec> MediaCodec::CreateByType(
-        const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err, pid_t pid) {
+        const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid) {
     sp<MediaCodec> codec = new MediaCodec(looper, pid);
 
     const status_t ret = codec->init(mime, true /* nameIsType */, encoder);
@@ -183,7 +182,7 @@
 
 // static
 sp<MediaCodec> MediaCodec::CreateByComponentName(
-        const sp<ALooper> &looper, const char *name, status_t *err, pid_t pid) {
+        const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid) {
     sp<MediaCodec> codec = new MediaCodec(looper, pid);
 
     const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
@@ -194,6 +193,22 @@
 }
 
 // static
+status_t MediaCodec::QueryCapabilities(
+        const AString &name, const AString &mime, bool isEncoder,
+        sp<MediaCodecInfo::Capabilities> *caps /* nonnull */) {
+    // TRICKY: this method is used by MediaCodecList/Info during its
+    // initialization. As such, we cannot create a MediaCodec instance
+    // because that requires an initialized MediaCodecList.
+
+    sp<CodecBase> codec = GetCodecBase(name);
+    if (codec == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return codec->queryCapabilities(name, mime, isEncoder, caps);
+}
+
+// static
 sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
     OMXClient client;
     CHECK_EQ(client.connect(), (status_t)OK);
@@ -299,6 +314,18 @@
     response->postReply(replyID);
 }
 
+//static
+sp<CodecBase> MediaCodec::GetCodecBase(const AString &name, bool nameIsType) {
+    // at this time only ACodec specifies a mime type.
+    if (nameIsType || name.startsWithIgnoreCase("omx.")) {
+        return new ACodec;
+    } else if (name.startsWithIgnoreCase("android.filter.")) {
+        return new MediaFilter;
+    } else {
+        return NULL;
+    }
+}
+
 status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
     mResourceManagerService->init();
 
@@ -312,12 +339,8 @@
     // we need to invest in an extra looper to free the main event
     // queue.
 
-    if (nameIsType || !strncasecmp(name.c_str(), "omx.", 4)) {
-        mCodec = new ACodec;
-    } else if (!nameIsType
-            && !strncasecmp(name.c_str(), "android.filter.", 15)) {
-        mCodec = new MediaFilter;
-    } else {
+    mCodec = GetCodecBase(name, nameIsType);
+    if (mCodec == NULL) {
         return NAME_NOT_FOUND;
     }
 
@@ -376,9 +399,11 @@
 
     status_t err;
     Vector<MediaResource> resources;
-    const char *type = secureCodec ? kResourceSecureCodec : kResourceNonSecureCodec;
-    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
-    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+    MediaResource::Type type =
+            secureCodec ? MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+    MediaResource::SubType subtype =
+            mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+    resources.push_back(MediaResource(type, subtype, 1));
     for (int i = 0; i <= kMaxRetry; ++i) {
         if (i > 0) {
             // Don't try to reclaim resource for the first time.
@@ -423,6 +448,13 @@
         if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
             mRotationDegrees = 0;
         }
+
+        // Prevent possible integer overflow in downstream code.
+        if (mInitIsEncoder
+                && (uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
+            ALOGE("buffer size is too big, width=%d, height=%d", mVideoWidth, mVideoHeight);
+            return BAD_VALUE;
+        }
     }
 
     msg->setMessage("format", format);
@@ -438,13 +470,14 @@
 
     status_t err;
     Vector<MediaResource> resources;
-    const char *type = (mFlags & kFlagIsSecure) ?
-            kResourceSecureCodec : kResourceNonSecureCodec;
-    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
-    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+    MediaResource::Type type = (mFlags & kFlagIsSecure) ?
+            MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+    MediaResource::SubType subtype =
+            mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+    resources.push_back(MediaResource(type, subtype, 1));
     // Don't know the buffer size at this point, but it's fine to use 1 because
     // the reclaimResource call doesn't consider the requester's buffer size for now.
-    resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+    resources.push_back(MediaResource(MediaResource::kGraphicMemory, 1));
     for (int i = 0; i <= kMaxRetry; ++i) {
         if (i > 0) {
             // Don't try to reclaim resource for the first time.
@@ -523,7 +556,8 @@
     return size;
 }
 
-void MediaCodec::addResource(const String8 &type, const String8 &subtype, uint64_t value) {
+void MediaCodec::addResource(
+        MediaResource::Type type, MediaResource::SubType subtype, uint64_t value) {
     Vector<MediaResource> resources;
     resources.push_back(MediaResource(type, subtype, value));
     mResourceManagerService->addResource(
@@ -535,13 +569,14 @@
 
     status_t err;
     Vector<MediaResource> resources;
-    const char *type = (mFlags & kFlagIsSecure) ?
-            kResourceSecureCodec : kResourceNonSecureCodec;
-    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
-    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+    MediaResource::Type type = (mFlags & kFlagIsSecure) ?
+            MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+    MediaResource::SubType subtype =
+            mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+    resources.push_back(MediaResource(type, subtype, 1));
     // Don't know the buffer size at this point, but it's fine to use 1 because
     // the reclaimResource call doesn't consider the requester's buffer size for now.
-    resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+    resources.push_back(MediaResource(MediaResource::kGraphicMemory, 1));
     for (int i = 0; i <= kMaxRetry; ++i) {
         if (i > 0) {
             // Don't try to reclaim resource for the first time.
@@ -681,6 +716,7 @@
         const uint8_t key[16],
         const uint8_t iv[16],
         CryptoPlugin::Mode mode,
+        const CryptoPlugin::Pattern &pattern,
         int64_t presentationTimeUs,
         uint32_t flags,
         AString *errorDetailMsg) {
@@ -696,6 +732,8 @@
     msg->setPointer("key", (void *)key);
     msg->setPointer("iv", (void *)iv);
     msg->setInt32("mode", mode);
+    msg->setInt32("encryptBlocks", pattern.mEncryptBlocks);
+    msg->setInt32("skipBlocks", pattern.mSkipBlocks);
     msg->setInt64("timeUs", presentationTimeUs);
     msg->setInt32("flags", flags);
     msg->setPointer("errorDetailMsg", errorDetailMsg);
@@ -873,33 +911,54 @@
         size_t portIndex, size_t index,
         sp<ABuffer> *buffer, sp<AMessage> *format) {
     // use mutex instead of a context switch
-
     if (mReleasedByResourceManager) {
+        ALOGE("getBufferAndFormat - resource already released");
         return DEAD_OBJECT;
     }
 
+    if (buffer == NULL) {
+        ALOGE("getBufferAndFormat - null ABuffer");
+        return INVALID_OPERATION;
+    }
+
+    if (format == NULL) {
+        ALOGE("getBufferAndFormat - null AMessage");
+        return INVALID_OPERATION;
+    }
+
     buffer->clear();
     format->clear();
+
     if (!isExecuting()) {
+        ALOGE("getBufferAndFormat - not executing");
         return INVALID_OPERATION;
     }
 
     // we do not want mPortBuffers to change during this section
     // we also don't want mOwnedByClient to change during this
     Mutex::Autolock al(mBufferLock);
+
     Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
-    if (index < buffers->size()) {
-        const BufferInfo &info = buffers->itemAt(index);
-        if (info.mOwnedByClient) {
-            // by the time buffers array is initialized, crypto is set
-            if (portIndex == kPortIndexInput && mCrypto != NULL) {
-                *buffer = info.mEncryptedData;
-            } else {
-                *buffer = info.mData;
-            }
-            *format = info.mFormat;
-        }
+    if (index >= buffers->size()) {
+        ALOGE("getBufferAndFormat - trying to get buffer with "
+              "bad index (index=%zu buffer_size=%zu)", index, buffers->size());
+        return INVALID_OPERATION;
     }
+
+    const BufferInfo &info = buffers->itemAt(index);
+    if (!info.mOwnedByClient) {
+        ALOGE("getBufferAndFormat - invalid operation "
+              "(the index %zu is not owned by client)", index);
+        return INVALID_OPERATION;
+    }
+
+    // by the time buffers array is initialized, crypto is set
+    *buffer = (portIndex == kPortIndexInput && mCrypto != NULL) ?
+                  info.mEncryptedData :
+                  info.mData;
+
+    *format = info.mFormat;
+
     return OK;
 }
 
@@ -1174,18 +1233,18 @@
                         mFlags &= ~kFlagUsesSoftwareRenderer;
                     }
 
-                    String8 resourceType;
+                    MediaResource::Type resourceType;
                     if (mComponentName.endsWith(".secure")) {
                         mFlags |= kFlagIsSecure;
-                        resourceType = String8(kResourceSecureCodec);
+                        resourceType = MediaResource::kSecureCodec;
                     } else {
                         mFlags &= ~kFlagIsSecure;
-                        resourceType = String8(kResourceNonSecureCodec);
+                        resourceType = MediaResource::kNonSecureCodec;
                     }
 
                     if (mIsVideo) {
                         // audio codec is currently ignored.
-                        addResource(resourceType, String8(kResourceVideoCodec), 1);
+                        addResource(resourceType, MediaResource::kVideoCodec, 1);
                     }
 
                     (new AMessage)->postReply(mReplyID);
@@ -1207,7 +1266,10 @@
 
                     CHECK(msg->findMessage("input-format", &mInputFormat));
                     CHECK(msg->findMessage("output-format", &mOutputFormat));
-
+                    ALOGV("[%s] configured as input format: %s, output format: %s",
+                            mComponentName.c_str(),
+                            mInputFormat->debugString(4).c_str(),
+                            mOutputFormat->debugString(4).c_str());
                     int32_t usingSwRenderer;
                     if (mOutputFormat->findInt32("using-sw-renderer", &usingSwRenderer)
                             && usingSwRenderer) {
@@ -1226,6 +1288,12 @@
                     if (!msg->findInt32("err", &err)) {
                         sp<RefBase> obj;
                         msg->findObject("input-surface", &obj);
+                        CHECK(msg->findMessage("input-format", &mInputFormat));
+                        CHECK(msg->findMessage("output-format", &mOutputFormat));
+                        ALOGV("[%s] input surface created as input format: %s, output format: %s",
+                                mComponentName.c_str(),
+                                mInputFormat->debugString(4).c_str(),
+                                mOutputFormat->debugString(4).c_str());
                         CHECK(obj != NULL);
                         response->setObject("input-surface", obj);
                         mHaveInputSurface = true;
@@ -1303,6 +1371,8 @@
                         info.mBufferID = portDesc->bufferIDAt(i);
                         info.mOwnedByClient = false;
                         info.mData = portDesc->bufferAt(i);
+                        info.mNativeHandle = portDesc->handleAt(i);
+                        info.mMemRef = portDesc->memRefAt(i);
 
                         if (portIndex == kPortIndexInput && mCrypto != NULL) {
                             sp<IMemory> mem = mDealer->allocate(info.mData->capacity());
@@ -1320,10 +1390,9 @@
                             // allocating input buffers, so this is a good
                             // indication that now all buffers are allocated.
                             if (mIsVideo) {
-                                String8 subtype;
                                 addResource(
-                                        String8(kResourceGraphicMemory),
-                                        subtype,
+                                        MediaResource::kGraphicMemory,
+                                        MediaResource::kUnspecifiedSubType,
                                         getGraphicBufferSize());
                             }
                             setState(STARTED);
@@ -1338,21 +1407,34 @@
 
                 case CodecBase::kWhatOutputFormatChanged:
                 {
-                    ALOGV("codec output format changed");
+                    CHECK(msg->findMessage("format", &mOutputFormat));
+
+                    ALOGV("[%s] output format changed to: %s",
+                            mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
 
                     if (mSoftRenderer == NULL &&
                             mSurface != NULL &&
                             (mFlags & kFlagUsesSoftwareRenderer)) {
                         AString mime;
-                        CHECK(msg->findString("mime", &mime));
+                        CHECK(mOutputFormat->findString("mime", &mime));
+
+                        // TODO: propagate color aspects to software renderer to allow better
+                        // color conversion to RGB. For now, just mark dataspace for YUV
+                        // rendering.
+                        int32_t dataSpace;
+                        if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+                            ALOGD("[%s] setting dataspace on output surface to #%x",
+                                    mComponentName.c_str(), dataSpace);
+                            int err = native_window_set_buffers_data_space(
+                                    mSurface.get(), (android_dataspace)dataSpace);
+                            ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
+                        }
 
                         if (mime.startsWithIgnoreCase("video/")) {
                             mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
                         }
                     }
 
-                    mOutputFormat = msg;
-
                     if (mFlags & kFlagIsEncoder) {
                         // Before we announce the format change we should
                         // collect codec specific data and amend the output
@@ -1709,9 +1791,8 @@
                         err = BAD_VALUE;
                     } else {
                         err = connectToSurface(surface);
-                        if (err == BAD_VALUE) {
-                            // assuming reconnecting to same surface
-                            // TODO: check if it is the same surface
+                        if (err == ALREADY_EXISTS) {
+                            // reconnecting to same surface
                             err = OK;
                         } else {
                             if (err == OK) {
@@ -1875,7 +1956,7 @@
             mCodec->initiateShutdown(
                     msg->what() == kWhatStop /* keepComponentAllocated */);
 
-            returnBuffersToCodec();
+            returnBuffersToCodec(reclaimed);
 
             if (mSoftRenderer != NULL && (mFlags & kFlagPushBlankBuffersOnShutdown)) {
                 pushBlankBuffersToNativeWindow(mSurface.get());
@@ -2200,6 +2281,9 @@
         if (!format->findBuffer(AStringPrintf("csd-%u", i).c_str(), &csd)) {
             break;
         }
+        if (csd->size() == 0) {
+            ALOGW("csd-%zu size is 0", i);
+        }
 
         mCSD.push_back(csd);
         ++i;
@@ -2278,12 +2362,12 @@
     updateBatteryStat();
 }
 
-void MediaCodec::returnBuffersToCodec() {
-    returnBuffersToCodecOnPort(kPortIndexInput);
-    returnBuffersToCodecOnPort(kPortIndexOutput);
+void MediaCodec::returnBuffersToCodec(bool isReclaim) {
+    returnBuffersToCodecOnPort(kPortIndexInput, isReclaim);
+    returnBuffersToCodecOnPort(kPortIndexOutput, isReclaim);
 }
 
-void MediaCodec::returnBuffersToCodecOnPort(int32_t portIndex) {
+void MediaCodec::returnBuffersToCodecOnPort(int32_t portIndex, bool isReclaim) {
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
     Mutex::Autolock al(mBufferLock);
 
@@ -2295,7 +2379,13 @@
         if (info->mNotify != NULL) {
             sp<AMessage> msg = info->mNotify;
             info->mNotify = NULL;
-            info->mOwnedByClient = false;
+            if (isReclaim && info->mOwnedByClient) {
+                ALOGD("port %d buffer %zu still owned by client when codec is reclaimed",
+                        portIndex, i);
+            } else {
+                info->mMemRef = NULL;
+                info->mOwnedByClient = false;
+            }
 
             if (portIndex == kPortIndexInput) {
                 /* no error, just returning buffers */
@@ -2357,6 +2447,7 @@
     // We allow the simpler queueInputBuffer API to be used even in
     // secure mode, by fabricating a single unencrypted subSample.
     CryptoPlugin::SubSample ss;
+    CryptoPlugin::Pattern pattern;
 
     if (msg->findSize("size", &size)) {
         if (mCrypto != NULL) {
@@ -2367,6 +2458,8 @@
             numSubSamples = 1;
             key = NULL;
             iv = NULL;
+            pattern.mEncryptBlocks = 0;
+            pattern.mSkipBlocks = 0;
         }
     } else {
         if (mCrypto == NULL) {
@@ -2377,6 +2470,8 @@
         CHECK(msg->findSize("numSubSamples", &numSubSamples));
         CHECK(msg->findPointer("key", (void **)&key));
         CHECK(msg->findPointer("iv", (void **)&iv));
+        CHECK(msg->findInt32("encryptBlocks", (int32_t *)&pattern.mEncryptBlocks));
+        CHECK(msg->findInt32("skipBlocks", (int32_t *)&pattern.mSkipBlocks));
 
         int32_t tmp;
         CHECK(msg->findInt32("mode", &tmp));
@@ -2424,16 +2519,27 @@
         AString *errorDetailMsg;
         CHECK(msg->findPointer("errorDetailMsg", (void **)&errorDetailMsg));
 
+        void *dst_pointer = info->mData->base();
+        ICrypto::DestinationType dst_type = ICrypto::kDestinationTypeOpaqueHandle;
+
+        if (info->mNativeHandle != NULL) {
+            dst_pointer = (void *)info->mNativeHandle->handle();
+            dst_type = ICrypto::kDestinationTypeNativeHandle;
+        } else if ((mFlags & kFlagIsSecure) == 0) {
+            dst_type = ICrypto::kDestinationTypeVmPointer;
+        }
+
         ssize_t result = mCrypto->decrypt(
-                (mFlags & kFlagIsSecure) != 0,
+                dst_type,
                 key,
                 iv,
                 mode,
+                pattern,
                 info->mSharedEncryptedBuffer,
                 offset,
                 subSamples,
                 numSubSamples,
-                info->mData->base(),
+                dst_pointer,
                 errorDetailMsg);
 
         if (result < 0) {
@@ -2576,11 +2682,17 @@
 status_t MediaCodec::connectToSurface(const sp<Surface> &surface) {
     status_t err = OK;
     if (surface != NULL) {
+        uint64_t oldId, newId;
+        if (mSurface != NULL
+                && surface->getUniqueId(&newId) == NO_ERROR
+                && mSurface->getUniqueId(&oldId) == NO_ERROR
+                && newId == oldId) {
+            ALOGI("[%s] connecting to the same surface. Nothing to do.", mComponentName.c_str());
+            return ALREADY_EXISTS;
+        }
+
         err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
-        if (err == BAD_VALUE) {
-            ALOGI("native window already connected. Assuming no change of surface");
-            return err;
-        } else if (err == OK) {
+        if (err == OK) {
             // Require a fresh set of buffers after each connect by using a unique generation
             // number. Rely on the fact that max supported process id by Linux is 2^22.
             // PID is never 0 so we don't have to worry that we use the default generation of 0.
@@ -2602,7 +2714,8 @@
             ALOGE("native_window_api_connect returned an error: %s (%d)", strerror(-err), err);
         }
     }
-    return err;
+    // do not return ALREADY_EXISTS unless surfaces are the same
+    return err == ALREADY_EXISTS ? BAD_VALUE : err;
 }
 
 status_t MediaCodec::disconnectFromSurface() {
@@ -2789,25 +2902,15 @@
 }
 
 void MediaCodec::updateBatteryStat() {
+    if (!mIsVideo) {
+        return;
+    }
+
     if (mState == CONFIGURED && !mBatteryStatNotified) {
-        BatteryNotifier& notifier(BatteryNotifier::getInstance());
-
-        if (mIsVideo) {
-            notifier.noteStartVideo();
-        } else {
-            notifier.noteStartAudio();
-        }
-
+        BatteryNotifier::getInstance().noteStartVideo();
         mBatteryStatNotified = true;
     } else if (mState == UNINITIALIZED && mBatteryStatNotified) {
-        BatteryNotifier& notifier(BatteryNotifier::getInstance());
-
-        if (mIsVideo) {
-            notifier.noteStopVideo();
-        } else {
-            notifier.noteStopAudio();
-        }
-
+        BatteryNotifier::getInstance().noteStopVideo();
         mBatteryStatNotified = false;
     }
 }
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 5edc04c..0fb5072 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -30,16 +30,17 @@
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/ACodec.h>
+#include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
 
 #include <sys/stat.h>
 #include <utils/threads.h>
 
 #include <cutils/properties.h>
-#include <libexpat/expat.h>
+#include <expat.h>
 
 namespace android {
 
@@ -751,15 +752,19 @@
     ALOGV("initializeCapabilities %s:%s",
             mCurrentInfo->mName.c_str(), type);
 
-    CodecCapabilities caps;
-    status_t err = QueryCodec(
-            mOMX,
-            mCurrentInfo->mName.c_str(),
+    sp<MediaCodecInfo::Capabilities> caps;
+    status_t err = MediaCodec::QueryCapabilities(
+            mCurrentInfo->mName,
             type,
             mCurrentInfo->mIsEncoder,
             &caps);
     if (err != OK) {
         return err;
+    } else if (caps == NULL) {
+        ALOGE("MediaCodec::QueryCapabilities returned OK but no capabilities for '%s':'%s':'%s'",
+                mCurrentInfo->mName.c_str(), type,
+                mCurrentInfo->mIsEncoder ? "encoder" : "decoder");
+        return UNKNOWN_ERROR;
     }
 
     return mCurrentInfo->initializeCapabilities(caps);
@@ -1115,4 +1120,85 @@
     return mGlobalSettings;
 }
 
+//static
+bool MediaCodecList::isSoftwareCodec(const AString &componentName) {
+    return componentName.startsWithIgnoreCase("OMX.google.")
+        || !componentName.startsWithIgnoreCase("OMX.");
+}
+
+static int compareSoftwareCodecsFirst(const AString *name1, const AString *name2) {
+    // sort order 1: software codecs are first (lower)
+    bool isSoftwareCodec1 = MediaCodecList::isSoftwareCodec(*name1);
+    bool isSoftwareCodec2 = MediaCodecList::isSoftwareCodec(*name2);
+    if (isSoftwareCodec1 != isSoftwareCodec2) {
+        return isSoftwareCodec2 - isSoftwareCodec1;
+    }
+
+    // sort order 2: OMX codecs are first (lower)
+    bool isOMX1 = name1->startsWithIgnoreCase("OMX.");
+    bool isOMX2 = name2->startsWithIgnoreCase("OMX.");
+    return isOMX2 - isOMX1;
+}
+
+//static
+void MediaCodecList::findMatchingCodecs(
+        const char *mime, bool encoder, uint32_t flags, Vector<AString> *matches) {
+    matches->clear();
+
+    const sp<IMediaCodecList> list = getInstance();
+    if (list == NULL) {
+        return;
+    }
+
+    size_t index = 0;
+    for (;;) {
+        ssize_t matchIndex =
+            list->findCodecByType(mime, encoder, index);
+
+        if (matchIndex < 0) {
+            break;
+        }
+
+        index = matchIndex + 1;
+
+        const sp<MediaCodecInfo> info = list->getCodecInfo(matchIndex);
+        CHECK(info != NULL);
+        AString componentName = info->getCodecName();
+
+        if (!((flags & kHardwareCodecsOnly) && !isSoftwareCodec(componentName))) {
+            matches->push(componentName);
+            ALOGV("matching '%s'", componentName.c_str());
+        }
+    }
+
+    if (flags & kPreferSoftwareCodecs) {
+        matches->sort(compareSoftwareCodecsFirst);
+    }
+}
+
+// static
+uint32_t MediaCodecList::getQuirksFor(const char *componentName) {
+    const sp<IMediaCodecList> list = getInstance();
+    if (list == NULL) {
+        return 0;
+    }
+
+    ssize_t ix = list->findCodecByName(componentName);
+    if (ix < 0) {
+        return 0;
+    }
+
+    const sp<MediaCodecInfo> info = list->getCodecInfo(ix);
+
+    uint32_t quirks = 0;
+    if (info->hasQuirk("requires-allocate-on-input-ports")) {
+        quirks |= ACodec::kRequiresAllocateBufferOnInputPorts;
+    }
+    if (info->hasQuirk("requires-allocate-on-output-ports")) {
+        quirks |= ACodec::kRequiresAllocateBufferOnOutputPorts;
+    }
+
+    return quirks;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 7f9f824..0aafa6bd 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -30,6 +30,7 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaCodecSource.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
@@ -39,18 +40,24 @@
 
 namespace android {
 
-const int kDefaultSwVideoEncoderFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
-const int kDefaultSwVideoEncoderDataSpace = HAL_DATASPACE_BT709;
+const int32_t kDefaultSwVideoEncoderFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
+const int32_t kDefaultHwVideoEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+const int32_t kDefaultVideoEncoderDataSpace = HAL_DATASPACE_V0_BT709;
+
+const int kStopTimeoutUs = 300000; // allow 1 sec for shutting down encoder
 
 struct MediaCodecSource::Puller : public AHandler {
     Puller(const sp<MediaSource> &source);
 
+    void interruptSource();
     status_t start(const sp<MetaData> &meta, const sp<AMessage> &notify);
     void stop();
-
+    void stopSource();
     void pause();
     void resume();
 
+    bool readBuffer(MediaBuffer **buffer);
+
 protected:
     virtual void onMessageReceived(const sp<AMessage> &msg);
     virtual ~Puller();
@@ -60,17 +67,31 @@
         kWhatStart = 'msta',
         kWhatStop,
         kWhatPull,
-        kWhatPause,
-        kWhatResume,
     };
 
     sp<MediaSource> mSource;
     sp<AMessage> mNotify;
     sp<ALooper> mLooper;
-    int32_t mPullGeneration;
     bool mIsAudio;
-    bool mPaused;
-    bool mReachedEOS;
+
+    struct Queue {
+        Queue()
+            : mReadPendingSince(0),
+              mPaused(false),
+              mPulling(false) { }
+        int64_t mReadPendingSince;
+        bool mPaused;
+        bool mPulling;
+        Vector<MediaBuffer *> mReadBuffers;
+
+        void flush();
+        // if queue is empty, return false and set *|buffer| to NULL . Otherwise, pop
+        // buffer from front of the queue, place it into *|buffer| and return true.
+        bool readBuffer(MediaBuffer **buffer);
+        // add a buffer to the back of the queue
+        void pushBuffer(MediaBuffer *mbuf);
+    };
+    Mutexed<Queue> mQueue;
 
     status_t postSynchronouslyAndReturnError(const sp<AMessage> &msg);
     void schedulePull();
@@ -82,10 +103,8 @@
 MediaCodecSource::Puller::Puller(const sp<MediaSource> &source)
     : mSource(source),
       mLooper(new ALooper()),
-      mPullGeneration(0),
-      mIsAudio(false),
-      mPaused(false),
-      mReachedEOS(false) {
+      mIsAudio(false)
+{
     sp<MetaData> meta = source->getFormat();
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -100,6 +119,33 @@
     mLooper->stop();
 }
 
+void MediaCodecSource::Puller::Queue::pushBuffer(MediaBuffer *mbuf) {
+    mReadBuffers.push_back(mbuf);
+}
+
+bool MediaCodecSource::Puller::Queue::readBuffer(MediaBuffer **mbuf) {
+    if (mReadBuffers.empty()) {
+        *mbuf = NULL;
+        return false;
+    }
+    *mbuf = *mReadBuffers.begin();
+    mReadBuffers.erase(mReadBuffers.begin());
+    return true;
+}
+
+void MediaCodecSource::Puller::Queue::flush() {
+    MediaBuffer *mbuf;
+    while (readBuffer(&mbuf)) {
+        // there are no null buffers in the queue
+        mbuf->release();
+    }
+}
+
+bool MediaCodecSource::Puller::readBuffer(MediaBuffer **mbuf) {
+    Mutexed<Queue>::Locked queue(mQueue);
+    return queue->readBuffer(mbuf);
+}
+
 status_t MediaCodecSource::Puller::postSynchronouslyAndReturnError(
         const sp<AMessage> &msg) {
     sp<AMessage> response;
@@ -116,8 +162,7 @@
     return err;
 }
 
-status_t MediaCodecSource::Puller::start(const sp<MetaData> &meta,
-        const sp<AMessage> &notify) {
+status_t MediaCodecSource::Puller::start(const sp<MetaData> &meta, const sp<AMessage> &notify) {
     ALOGV("puller (%s) start", mIsAudio ? "audio" : "video");
     mLooper->start(
             false /* runOnCallingThread */,
@@ -132,41 +177,51 @@
 }
 
 void MediaCodecSource::Puller::stop() {
-    // Stop source from caller's thread instead of puller's looper.
-    // mSource->stop() is thread-safe, doing it outside the puller's
-    // looper allows us to at least stop if source gets stuck.
-    // If source gets stuck in read(), the looper would never
-    // be able to process the stop(), which could lead to ANR.
+    bool interrupt = false;
+    {
+        // mark stopping before actually reaching kWhatStop on the looper, so the pulling will
+        // stop.
+        Mutexed<Queue>::Locked queue(mQueue);
+        queue->mPulling = false;
+        interrupt = queue->mReadPendingSince && (queue->mReadPendingSince < ALooper::GetNowUs() - 1000000);
+        queue->flush(); // flush any unprocessed pulled buffers
+    }
 
-    ALOGV("source (%s) stopping", mIsAudio ? "audio" : "video");
+    if (interrupt) {
+        interruptSource();
+    }
+}
+
+void MediaCodecSource::Puller::interruptSource() {
+    // call source->stop if read has been pending for over a second
+    // We have to call this outside the looper as looper is pending on the read.
     mSource->stop();
-    ALOGV("source (%s) stopped", mIsAudio ? "audio" : "video");
+}
 
-    (new AMessage(kWhatStop, this))->post();
+void MediaCodecSource::Puller::stopSource() {
+    sp<AMessage> msg = new AMessage(kWhatStop, this);
+    (void)postSynchronouslyAndReturnError(msg);
 }
 
 void MediaCodecSource::Puller::pause() {
-    (new AMessage(kWhatPause, this))->post();
+    Mutexed<Queue>::Locked queue(mQueue);
+    queue->mPaused = true;
 }
 
 void MediaCodecSource::Puller::resume() {
-    (new AMessage(kWhatResume, this))->post();
+    Mutexed<Queue>::Locked queue(mQueue);
+    queue->mPaused = false;
 }
 
 void MediaCodecSource::Puller::schedulePull() {
-    sp<AMessage> msg = new AMessage(kWhatPull, this);
-    msg->setInt32("generation", mPullGeneration);
-    msg->post();
+    (new AMessage(kWhatPull, this))->post();
 }
 
 void MediaCodecSource::Puller::handleEOS() {
-    if (!mReachedEOS) {
-        ALOGV("puller (%s) posting EOS", mIsAudio ? "audio" : "video");
-        mReachedEOS = true;
-        sp<AMessage> notify = mNotify->dup();
-        notify->setPointer("accessUnit", NULL);
-        notify->post();
-    }
+    ALOGV("puller (%s) posting EOS", mIsAudio ? "audio" : "video");
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("eos", 1);
+    msg->post();
 }
 
 void MediaCodecSource::Puller::onMessageReceived(const sp<AMessage> &msg) {
@@ -176,7 +231,10 @@
             sp<RefBase> obj;
             CHECK(msg->findObject("meta", &obj));
 
-            mReachedEOS = false;
+            {
+                Mutexed<Queue>::Locked queue(mQueue);
+                queue->mPulling = true;
+            }
 
             status_t err = mSource->start(static_cast<MetaData *>(obj.get()));
 
@@ -195,61 +253,60 @@
 
         case kWhatStop:
         {
-            ++mPullGeneration;
+            mSource->stop();
 
-            handleEOS();
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", OK);
+
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            response->postReply(replyID);
             break;
         }
 
         case kWhatPull:
         {
-            int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
-
-            if (generation != mPullGeneration) {
+            Mutexed<Queue>::Locked queue(mQueue);
+            queue->mReadPendingSince = ALooper::GetNowUs();
+            if (!queue->mPulling) {
+                handleEOS();
                 break;
             }
 
-            MediaBuffer *mbuf;
+            queue.unlock();
+            MediaBuffer *mbuf = NULL;
             status_t err = mSource->read(&mbuf);
+            queue.lock();
 
-            if (mPaused) {
-                if (err == OK) {
+            queue->mReadPendingSince = 0;
+            // if we need to discard buffer
+            if (!queue->mPulling || queue->mPaused || err != OK) {
+                if (mbuf != NULL) {
                     mbuf->release();
                     mbuf = NULL;
                 }
-
-                msg->post();
-                break;
-            }
-
-            if (err != OK) {
-                if (err == ERROR_END_OF_STREAM) {
+                if (queue->mPulling && err == OK) {
+                    msg->post(); // if simply paused, keep pulling source
+                    break;
+                } else if (err == ERROR_END_OF_STREAM) {
                     ALOGV("stream ended, mbuf %p", mbuf);
-                } else {
+                } else if (err != OK) {
                     ALOGE("error %d reading stream.", err);
                 }
-                handleEOS();
-            } else {
-                sp<AMessage> notify = mNotify->dup();
-
-                notify->setPointer("accessUnit", mbuf);
-                notify->post();
-
-                msg->post();
             }
-            break;
-        }
 
-        case kWhatPause:
-        {
-            mPaused = true;
-            break;
-        }
+            if (mbuf != NULL) {
+                queue->pushBuffer(mbuf);
+            }
 
-        case kWhatResume:
-        {
-            mPaused = false;
+            queue.unlock();
+
+            if (mbuf != NULL) {
+                mNotify->post();
+                msg->post();
+            } else {
+                handleEOS();
+            }
             break;
         }
 
@@ -258,6 +315,11 @@
     }
 }
 
+MediaCodecSource::Output::Output()
+    : mEncoderReachedEOS(false),
+      mErrorCode(OK) {
+}
+
 // static
 sp<MediaCodecSource> MediaCodecSource::Create(
         const sp<ALooper> &looper,
@@ -274,6 +336,12 @@
     return NULL;
 }
 
+void MediaCodecSource::setInputBufferTimeOffset(int64_t timeOffsetUs) {
+    sp<AMessage> msg = new AMessage(kWhatSetInputBufferTimeOffset, mReflector);
+    msg->setInt64("time-offset-us", timeOffsetUs);
+    postSynchronouslyAndReturnError(msg);
+}
+
 status_t MediaCodecSource::start(MetaData* params) {
     sp<AMessage> msg = new AMessage(kWhatStart, mReflector);
     msg->setObject("meta", params);
@@ -282,21 +350,7 @@
 
 status_t MediaCodecSource::stop() {
     sp<AMessage> msg = new AMessage(kWhatStop, mReflector);
-    status_t err = postSynchronouslyAndReturnError(msg);
-
-    // mPuller->stop() needs to be done outside MediaCodecSource's looper,
-    // as it contains a synchronous call to stop the underlying MediaSource,
-    // which often waits for all outstanding MediaBuffers to return, but
-    // MediaBuffers are only returned when MediaCodecSource looper gets
-    // to process them.
-
-    if (mPuller != NULL) {
-        ALOGI("puller (%s) stopping", mIsVideo ? "video" : "audio");
-        mPuller->stop();
-        ALOGI("puller (%s) stopped", mIsVideo ? "video" : "audio");
-    }
-
-    return err;
+    return postSynchronouslyAndReturnError(msg);
 }
 
 status_t MediaCodecSource::pause() {
@@ -304,6 +358,11 @@
     return OK;
 }
 
+sp<MetaData> MediaCodecSource::getFormat() {
+    Mutexed<sp<MetaData>>::Locked meta(mMeta);
+    return *meta;
+}
+
 sp<IGraphicBufferProducer> MediaCodecSource::getGraphicBufferProducer() {
     CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
     return mGraphicBufferProducer;
@@ -311,18 +370,18 @@
 
 status_t MediaCodecSource::read(
         MediaBuffer** buffer, const ReadOptions* /* options */) {
-    Mutex::Autolock autolock(mOutputBufferLock);
+    Mutexed<Output>::Locked output(mOutput);
 
     *buffer = NULL;
-    while (mOutputBufferQueue.size() == 0 && !mEncoderReachedEOS) {
-        mOutputBufferCond.wait(mOutputBufferLock);
+    while (output->mBufferQueue.size() == 0 && !output->mEncoderReachedEOS) {
+        output.waitForCondition(output->mCond);
     }
-    if (!mEncoderReachedEOS) {
-        *buffer = *mOutputBufferQueue.begin();
-        mOutputBufferQueue.erase(mOutputBufferQueue.begin());
+    if (!output->mEncoderReachedEOS) {
+        *buffer = *output->mBufferQueue.begin();
+        output->mBufferQueue.erase(output->mBufferQueue.begin());
         return OK;
     }
-    return mErrorCode;
+    return output->mErrorCode;
 }
 
 void MediaCodecSource::signalBufferReturned(MediaBuffer *buffer) {
@@ -348,9 +407,9 @@
       mEncoderFormat(0),
       mEncoderDataSpace(0),
       mGraphicBufferConsumer(consumer),
+      mInputBufferTimeOffsetUs(0),
       mFirstSampleTimeUs(-1ll),
-      mEncoderReachedEOS(false),
-      mErrorCode(OK) {
+      mGeneration(0) {
     CHECK(mLooper != NULL);
 
     AString mime;
@@ -390,10 +449,6 @@
     mCodecLooper->setName("codec_looper");
     mCodecLooper->start();
 
-    if (mFlags & FLAG_USE_METADATA_INPUT) {
-        mOutputFormat->setInt32("store-metadata-in-buffers", 1);
-    }
-
     if (mFlags & FLAG_USE_SURFACE_INPUT) {
         mOutputFormat->setInt32("create-input-buffers-suspended", 1);
     }
@@ -401,30 +456,47 @@
     AString outputMIME;
     CHECK(mOutputFormat->findString("mime", &outputMIME));
 
-    mEncoder = MediaCodec::CreateByType(
-            mCodecLooper, outputMIME.c_str(), true /* encoder */);
+    Vector<AString> matchingCodecs;
+    MediaCodecList::findMatchingCodecs(
+            outputMIME.c_str(), true /* encoder */,
+            ((mFlags & FLAG_PREFER_SOFTWARE_CODEC) ? MediaCodecList::kPreferSoftwareCodecs : 0),
+            &matchingCodecs);
 
-    if (mEncoder == NULL) {
-        return NO_INIT;
+    status_t err = NO_INIT;
+    for (size_t ix = 0; ix < matchingCodecs.size(); ++ix) {
+        mEncoder = MediaCodec::CreateByComponentName(
+                mCodecLooper, matchingCodecs[ix]);
+
+        if (mEncoder == NULL) {
+            continue;
+        }
+
+        ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
+
+        mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, mReflector);
+        mEncoder->setCallback(mEncoderActivityNotify);
+
+        err = mEncoder->configure(
+                    mOutputFormat,
+                    NULL /* nativeWindow */,
+                    NULL /* crypto */,
+                    MediaCodec::CONFIGURE_FLAG_ENCODE);
+
+        if (err == OK) {
+            break;
+        }
+        mEncoder->release();
+        mEncoder = NULL;
     }
 
-    ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
-
-    mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, mReflector);
-    mEncoder->setCallback(mEncoderActivityNotify);
-
-    status_t err = mEncoder->configure(
-                mOutputFormat,
-                NULL /* nativeWindow */,
-                NULL /* crypto */,
-                MediaCodec::CONFIGURE_FLAG_ENCODE);
-
     if (err != OK) {
         return err;
     }
 
     mEncoder->getOutputFormat(&mOutputFormat);
-    convertMessageToMetaData(mOutputFormat, mMeta);
+    sp<MetaData> meta = new MetaData;
+    convertMessageToMetaData(mOutputFormat, meta);
+    mMeta.lock().set(meta);
 
     if (mFlags & FLAG_USE_SURFACE_INPUT) {
         CHECK(mIsVideo);
@@ -447,13 +519,19 @@
     sp<AMessage> inputFormat;
     int32_t usingSwReadOften;
     mSetEncoderFormat = false;
-    if (mEncoder->getInputFormat(&inputFormat) == OK
-            && inputFormat->findInt32("using-sw-read-often", &usingSwReadOften)
-            && usingSwReadOften) {
-        // this is a SW encoder; signal source to allocate SW readable buffers
+    if (mEncoder->getInputFormat(&inputFormat) == OK) {
         mSetEncoderFormat = true;
-        mEncoderFormat = kDefaultSwVideoEncoderFormat;
-        mEncoderDataSpace = kDefaultSwVideoEncoderDataSpace;
+        if (inputFormat->findInt32("using-sw-read-often", &usingSwReadOften)
+                && usingSwReadOften) {
+            // this is a SW encoder; signal source to allocate SW readable buffers
+            mEncoderFormat = kDefaultSwVideoEncoderFormat;
+        } else {
+            mEncoderFormat = kDefaultHwVideoEncoderFormat;
+        }
+        if (!inputFormat->findInt32("android._dataspace", &mEncoderDataSpace)) {
+            mEncoderDataSpace = kDefaultVideoEncoderDataSpace;
+        }
+        ALOGV("setting dataspace %#x, format %#x", mEncoderDataSpace, mEncoderFormat);
     }
 
     err = mEncoder->start();
@@ -462,8 +540,11 @@
         return err;
     }
 
-    mEncoderReachedEOS = false;
-    mErrorCode = OK;
+    {
+        Mutexed<Output>::Locked output(mOutput);
+        output->mEncoderReachedEOS = false;
+        output->mErrorCode = OK;
+    }
 
     return OK;
 }
@@ -475,14 +556,6 @@
 
     mEncoder->release();
     mEncoder.clear();
-
-    while (!mInputBufferQueue.empty()) {
-        MediaBuffer *mbuf = *mInputBufferQueue.begin();
-        mInputBufferQueue.erase(mInputBufferQueue.begin());
-        if (mbuf != NULL) {
-            mbuf->release();
-        }
-    }
 }
 
 status_t MediaCodecSource::postSynchronouslyAndReturnError(
@@ -502,25 +575,32 @@
 }
 
 void MediaCodecSource::signalEOS(status_t err) {
-    if (!mEncoderReachedEOS) {
-        ALOGV("encoder (%s) reached EOS", mIsVideo ? "video" : "audio");
-        {
-            Mutex::Autolock autoLock(mOutputBufferLock);
+    bool reachedEOS = false;
+    {
+        Mutexed<Output>::Locked output(mOutput);
+        reachedEOS = output->mEncoderReachedEOS;
+        if (!reachedEOS) {
+            ALOGV("encoder (%s) reached EOS", mIsVideo ? "video" : "audio");
             // release all unread media buffers
-            for (List<MediaBuffer*>::iterator it = mOutputBufferQueue.begin();
-                    it != mOutputBufferQueue.end(); it++) {
+            for (List<MediaBuffer*>::iterator it = output->mBufferQueue.begin();
+                    it != output->mBufferQueue.end(); it++) {
                 (*it)->release();
             }
-            mOutputBufferQueue.clear();
-            mEncoderReachedEOS = true;
-            mErrorCode = err;
-            mOutputBufferCond.signal();
-        }
+            output->mBufferQueue.clear();
+            output->mEncoderReachedEOS = true;
+            output->mErrorCode = err;
+            output->mCond.signal();
 
-        releaseEncoder();
+            reachedEOS = true;
+            output.unlock();
+            releaseEncoder();
+        }
     }
-    if (mStopping && mEncoderReachedEOS) {
+
+    if (mStopping && reachedEOS) {
         ALOGI("encoder (%s) stopped", mIsVideo ? "video" : "audio");
+        mPuller->stopSource();
+        ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
         // posting reply to everyone that's waiting
         List<sp<AReplyToken>>::iterator it;
         for (it = mStopReplyIDQueue.begin();
@@ -529,6 +609,7 @@
         }
         mStopReplyIDQueue.clear();
         mStopping = false;
+        ++mGeneration;
     }
 }
 
@@ -554,11 +635,8 @@
 }
 
 status_t MediaCodecSource::feedEncoderInputBuffers() {
-    while (!mInputBufferQueue.empty()
-            && !mAvailEncoderInputIndices.empty()) {
-        MediaBuffer* mbuf = *mInputBufferQueue.begin();
-        mInputBufferQueue.erase(mInputBufferQueue.begin());
-
+    MediaBuffer* mbuf = NULL;
+    while (!mAvailEncoderInputIndices.empty() && mPuller->readBuffer(&mbuf)) {
         size_t bufferIndex = *mAvailEncoderInputIndices.begin();
         mAvailEncoderInputIndices.erase(mAvailEncoderInputIndices.begin());
 
@@ -568,6 +646,7 @@
 
         if (mbuf != NULL) {
             CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+            timeUs += mInputBufferTimeOffsetUs;
 
             // push decoding time for video, or drift time for audio
             if (mIsVideo) {
@@ -629,6 +708,9 @@
 
     if (mStarted) {
         ALOGI("MediaCodecSource (%s) resuming", mIsVideo ? "video" : "audio");
+        if (mIsVideo) {
+            mEncoder->requestIDRFrame();
+        }
         if (mFlags & FLAG_USE_SURFACE_INPUT) {
             resume();
         } else {
@@ -676,30 +758,19 @@
     switch (msg->what()) {
     case kWhatPullerNotify:
     {
-        MediaBuffer *mbuf;
-        CHECK(msg->findPointer("accessUnit", (void**)&mbuf));
-
-        if (mbuf == NULL) {
-            ALOGV("puller (%s) reached EOS",
-                    mIsVideo ? "video" : "audio");
+        int32_t eos = 0;
+        if (msg->findInt32("eos", &eos) && eos) {
+            ALOGV("puller (%s) reached EOS", mIsVideo ? "video" : "audio");
             signalEOS();
-        }
-
-        if (mEncoder == NULL) {
-            ALOGV("got msg '%s' after encoder shutdown.",
-                  msg->debugString().c_str());
-
-            if (mbuf != NULL) {
-                mbuf->release();
-            }
-
             break;
         }
 
-        mInputBufferQueue.push_back(mbuf);
+        if (mEncoder == NULL) {
+            ALOGV("got msg '%s' after encoder shutdown.", msg->debugString().c_str());
+            break;
+        }
 
         feedEncoderInputBuffers();
-
         break;
     }
     case kWhatEncoderActivity:
@@ -716,6 +787,15 @@
 
             mAvailEncoderInputIndices.push_back(index);
             feedEncoderInputBuffers();
+        } else if (cbID == MediaCodec::CB_OUTPUT_FORMAT_CHANGED) {
+            status_t err = mEncoder->getOutputFormat(&mOutputFormat);
+            if (err != OK) {
+                signalEOS(err);
+                break;
+            }
+            sp<MetaData> meta = new MetaData;
+            convertMessageToMetaData(mOutputFormat, meta);
+            mMeta.lock().set(meta);
         } else if (cbID == MediaCodec::CB_OUTPUT_AVAILABLE) {
             int32_t index;
             size_t offset;
@@ -749,6 +829,9 @@
                 if (mIsVideo) {
                     int64_t decodingTimeUs;
                     if (mFlags & FLAG_USE_SURFACE_INPUT) {
+                        // Time offset is not applied at
+                        // feedEncoderInputBuffer() in surface input case.
+                        timeUs += mInputBufferTimeOffsetUs;
                         // GraphicBufferSource is supposed to discard samples
                         // queued before start, and offset timeUs by start time
                         CHECK_GE(timeUs, 0ll);
@@ -788,9 +871,9 @@
             mbuf->add_ref();
 
             {
-                Mutex::Autolock autoLock(mOutputBufferLock);
-                mOutputBufferQueue.push_back(mbuf);
-                mOutputBufferCond.signal();
+                Mutexed<Output>::Locked output(mOutput);
+                output->mBufferQueue.push_back(mbuf);
+                output->mCond.signal();
             }
 
             mEncoder->releaseOutputBuffer(index);
@@ -824,7 +907,7 @@
         sp<AReplyToken> replyID;
         CHECK(msg->senderAwaitsResponse(&replyID));
 
-        if (mEncoderReachedEOS) {
+        if (mOutput.lock()->mEncoderReachedEOS) {
             // if we already reached EOS, reply and return now
             ALOGI("encoder (%s) already stopped",
                     mIsVideo ? "video" : "audio");
@@ -842,17 +925,41 @@
         mStopping = true;
 
         // if using surface, signal source EOS and wait for EOS to come back.
-        // otherwise, release encoder and post EOS if haven't done already
+        // otherwise, stop puller (which also clears the input buffer queue)
+        // and wait for the EOS message. We cannot call source->stop() because
+        // the encoder may still be processing input buffers.
         if (mFlags & FLAG_USE_SURFACE_INPUT) {
             mEncoder->signalEndOfInputStream();
         } else {
-            signalEOS();
+            mPuller->stop();
         }
+
+        // complete stop even if encoder/puller stalled
+        sp<AMessage> timeoutMsg = new AMessage(kWhatStopStalled, mReflector);
+        timeoutMsg->setInt32("generation", mGeneration);
+        timeoutMsg->post(kStopTimeoutUs);
         break;
     }
+
+    case kWhatStopStalled:
+    {
+        int32_t generation;
+        CHECK(msg->findInt32("generation", &generation));
+        if (generation != mGeneration) {
+             break;
+        }
+
+        if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+            ALOGV("source (%s) stopping", mIsVideo ? "video" : "audio");
+            mPuller->interruptSource();
+            ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
+        }
+        signalEOS();
+    }
+
     case kWhatPause:
     {
-        if (mFlags && FLAG_USE_SURFACE_INPUT) {
+        if (mFlags & FLAG_USE_SURFACE_INPUT) {
             suspend();
         } else {
             CHECK(mPuller != NULL);
@@ -860,6 +967,17 @@
         }
         break;
     }
+    case kWhatSetInputBufferTimeOffset:
+    {
+        sp<AReplyToken> replyID;
+        CHECK(msg->senderAwaitsResponse(&replyID));
+
+        CHECK(msg->findInt64("time-offset-us", &mInputBufferTimeOffsetUs));
+
+        sp<AMessage> response = new AMessage;
+        response->postReply(replyID);
+        break;
+    }
     default:
         TRESPASS();
     }
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 2a50692..845462b 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -28,6 +28,7 @@
 const char *MEDIA_MIMETYPE_VIDEO_H263 = "video/3gpp";
 const char *MEDIA_MIMETYPE_VIDEO_MPEG2 = "video/mpeg2";
 const char *MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
+const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION = "video/dolby-vision";
 
 const char *MEDIA_MIMETYPE_AUDIO_AMR_NB = "audio/3gpp";
 const char *MEDIA_MIMETYPE_AUDIO_AMR_WB = "audio/amr-wb";
@@ -62,6 +63,7 @@
 const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
 const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
 const char *MEDIA_MIMETYPE_TEXT_CEA_608 = "text/cea-608";
+const char *MEDIA_MIMETYPE_TEXT_CEA_708 = "text/cea-708";
 const char *MEDIA_MIMETYPE_DATA_TIMED_ID3 = "application/x-id3v4";
 
 }  // namespace android
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index e21fe6e..92ce88c 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -17,6 +17,8 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaExtractor"
 #include <utils/Log.h>
+#include <inttypes.h>
+#include <pwd.h>
 
 #include "include/AMRExtractor.h"
 #include "include/MP3Extractor.h"
@@ -33,15 +35,34 @@
 
 #include "matroska/MatroskaExtractor.h"
 
+#include <binder/IServiceManager.h>
+#include <binder/MemoryDealer.h>
+
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MetaData.h>
+#include <media/IMediaExtractorService.h>
+#include <cutils/properties.h>
 #include <utils/String8.h>
+#include <private/android_filesystem_config.h>
+
 
 namespace android {
 
+MediaExtractor::MediaExtractor():
+    mIsDrm(false) {
+    if (!LOG_NDEBUG) {
+        uid_t uid = getuid();
+        struct passwd *pw = getpwuid(uid);
+        ALOGI("extractor created in uid: %d (%s)", getuid(), pw->pw_name);
+    }
+
+}
+
+
 sp<MetaData> MediaExtractor::getMetaData() {
     return new MetaData;
 }
@@ -50,9 +71,129 @@
     return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_PAUSE | CAN_SEEK;
 }
 
+
+
+class RemoteDataSource : public BnDataSource {
+public:
+    enum {
+        kBufferSize = 64 * 1024,
+    };
+
+    static sp<IDataSource> wrap(const sp<DataSource> &source);
+    virtual ~RemoteDataSource();
+
+    virtual sp<IMemory> getIMemory();
+    virtual ssize_t readAt(off64_t offset, size_t size);
+    virtual status_t getSize(off64_t* size);
+    virtual void close();
+    virtual uint32_t getFlags();
+    virtual String8 toString();
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime);
+
+private:
+    sp<IMemory> mMemory;
+    sp<DataSource> mSource;
+    String8 mName;
+    RemoteDataSource(const sp<DataSource> &source);
+    DISALLOW_EVIL_CONSTRUCTORS(RemoteDataSource);
+};
+
+
+sp<IDataSource> RemoteDataSource::wrap(const sp<DataSource> &source) {
+    return new RemoteDataSource(source);
+}
+RemoteDataSource::RemoteDataSource(const sp<DataSource> &source) {
+    mSource = source;
+    sp<MemoryDealer> memoryDealer = new MemoryDealer(kBufferSize, "RemoteDataSource");
+    mMemory = memoryDealer->allocate(kBufferSize);
+    if (mMemory == NULL) {
+        ALOGE("Failed to allocate memory!");
+    }
+    mName = String8::format("RemoteDataSource(%s)", mSource->toString().string());
+}
+RemoteDataSource::~RemoteDataSource() {
+    close();
+}
+sp<IMemory> RemoteDataSource::getIMemory() {
+    return mMemory;
+}
+ssize_t RemoteDataSource::readAt(off64_t offset, size_t size) {
+    ALOGV("readAt(%" PRId64 ", %zu)", offset, size);
+    return mSource->readAt(offset, mMemory->pointer(), size);
+}
+status_t RemoteDataSource::getSize(off64_t* size) {
+    return mSource->getSize(size);
+}
+void RemoteDataSource::close() {
+    mSource = NULL;
+}
+uint32_t RemoteDataSource::getFlags() {
+    return mSource->flags();
+}
+
+String8 RemoteDataSource::toString() {
+    return mName;
+}
+
+sp<DecryptHandle> RemoteDataSource::DrmInitialization(const char *mime) {
+    return mSource->DrmInitialization(mime);
+}
+
 // static
-sp<MediaExtractor> MediaExtractor::Create(
+sp<IMediaExtractor> MediaExtractor::Create(
         const sp<DataSource> &source, const char *mime) {
+    ALOGV("MediaExtractor::Create %s", mime);
+
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get("media.stagefright.extractremote", value, NULL)
+            && (!strcmp("0", value) || !strcasecmp("false", value))) {
+        // local extractor
+        ALOGW("creating media extractor in calling process");
+        return CreateFromService(source, mime);
+    } else {
+        // Check if it's WVM, since WVMExtractor needs to be created in the media server process,
+        // not the extractor process.
+        String8 mime8;
+        float confidence;
+        sp<AMessage> meta;
+        if (SniffWVM(source, &mime8, &confidence, &meta) &&
+                !strcasecmp(mime8, MEDIA_MIMETYPE_CONTAINER_WVM)) {
+            return new WVMExtractor(source);
+        }
+
+        // Check if it's es-based DRM, since DRMExtractor needs to be created in the media server
+        // process, not the extractor process.
+        if (SniffDRM(source, &mime8, &confidence, &meta)) {
+            const char *drmMime = mime8.string();
+            ALOGV("Detected media content as '%s' with confidence %.2f", drmMime, confidence);
+            if (!strncmp(drmMime, "drm+es_based+", 13)) {
+                // DRMExtractor sets container metadata kKeyIsDRM to 1
+                return new DRMExtractor(source, drmMime + 14);
+            }
+        }
+
+        // remote extractor
+        ALOGV("get service manager");
+        sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
+
+        if (binder != 0) {
+            sp<IMediaExtractorService> mediaExService(interface_cast<IMediaExtractorService>(binder));
+            sp<IMediaExtractor> ex = mediaExService->makeExtractor(RemoteDataSource::wrap(source), mime);
+            return ex;
+        } else {
+            ALOGE("extractor service not running");
+            return NULL;
+        }
+    }
+    return NULL;
+}
+
+sp<MediaExtractor> MediaExtractor::CreateFromService(
+        const sp<DataSource> &source, const char *mime) {
+
+    ALOGV("MediaExtractor::CreateFromService %s", mime);
+    DataSource::RegisterDefaultSniffers();
+
     sp<AMessage> meta;
 
     String8 tmp;
@@ -110,7 +251,7 @@
         ret = new MatroskaExtractor(source);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
         ret = new MPEG2TSExtractor(source);
-    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM)) {
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM) && getuid() == AID_MEDIA) {
         // Return now.  WVExtractor should not have the DrmFlag set in the block below.
         return new WVMExtractor(source);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) {
diff --git a/media/libstagefright/MediaSource.cpp b/media/libstagefright/MediaSource.cpp
index 576471a..a17757a 100644
--- a/media/libstagefright/MediaSource.cpp
+++ b/media/libstagefright/MediaSource.cpp
@@ -22,56 +22,4 @@
 
 MediaSource::~MediaSource() {}
 
-////////////////////////////////////////////////////////////////////////////////
-
-MediaSource::ReadOptions::ReadOptions() {
-    reset();
-}
-
-void MediaSource::ReadOptions::reset() {
-    mOptions = 0;
-    mSeekTimeUs = 0;
-    mLatenessUs = 0;
-    mNonBlocking = false;
-}
-
-void MediaSource::ReadOptions::setNonBlocking() {
-    mNonBlocking = true;
-}
-
-void MediaSource::ReadOptions::clearNonBlocking() {
-    mNonBlocking = false;
-}
-
-bool MediaSource::ReadOptions::getNonBlocking() const {
-    return mNonBlocking;
-}
-
-void MediaSource::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
-    mOptions |= kSeekTo_Option;
-    mSeekTimeUs = time_us;
-    mSeekMode = mode;
-}
-
-void MediaSource::ReadOptions::clearSeekTo() {
-    mOptions &= ~kSeekTo_Option;
-    mSeekTimeUs = 0;
-    mSeekMode = SEEK_CLOSEST_SYNC;
-}
-
-bool MediaSource::ReadOptions::getSeekTo(
-        int64_t *time_us, SeekMode *mode) const {
-    *time_us = mSeekTimeUs;
-    *mode = mSeekMode;
-    return (mOptions & kSeekTo_Option) != 0;
-}
-
-void MediaSource::ReadOptions::setLateBy(int64_t lateness_us) {
-    mLatenessUs = lateness_us;
-}
-
-int64_t MediaSource::ReadOptions::getLateBy() const {
-    return mLatenessUs;
-}
-
 }  // namespace android
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
index 3a45e25..6f2d868 100644
--- a/media/libstagefright/MediaSync.cpp
+++ b/media/libstagefright/MediaSync.cpp
@@ -635,7 +635,7 @@
 
     ALOGV("acquired buffer %#llx from input", (long long)bufferItem.mGraphicBuffer->getId());
 
-    status = mInput->detachBuffer(bufferItem.mBuf);
+    status = mInput->detachBuffer(bufferItem.mSlot);
     if (status != NO_ERROR) {
         ALOGE("detaching buffer from input failed (%d)", status);
         if (status == NO_INIT) {
@@ -677,7 +677,6 @@
             bufferItem.mCrop,
             static_cast<int32_t>(bufferItem.mScalingMode),
             bufferItem.mTransform,
-            bufferItem.mIsDroppable,
             bufferItem.mFence);
 
     // Attach and queue the buffer to the output.
diff --git a/media/libstagefright/MidiExtractor.cpp b/media/libstagefright/MidiExtractor.cpp
index f6b8c84f..7930bbb 100644
--- a/media/libstagefright/MidiExtractor.cpp
+++ b/media/libstagefright/MidiExtractor.cpp
@@ -178,6 +178,7 @@
         mEasConfig = EAS_Config();
         trackMetadata->setInt32(kKeySampleRate, mEasConfig->sampleRate);
         trackMetadata->setInt32(kKeyChannelCount, mEasConfig->numChannels);
+        trackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
     }
     mIsInitialized = true;
 }
@@ -281,7 +282,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-sp<MediaSource> MidiExtractor::getTrack(size_t index)
+sp<IMediaSource> MidiExtractor::getTrack(size_t index)
 {
     if (mInitCheck != OK || index > 0) {
         return NULL;
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index d6255d6..453db03 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -224,6 +224,8 @@
     // So whenever we call DataSource::readAt it may end up in a call to
     // IMediaHTTPConnection::readAt and therefore call back into JAVA.
     mLooper->start(false /* runOnCallingThread */, true /* canCallJava */);
+
+    mName = String8::format("NuCachedSource2(%s)", mSource->toString().string());
 }
 
 NuCachedSource2::~NuCachedSource2() {
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index f24cf3a..a669dca 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -55,6 +55,9 @@
     }
 
     mSelectedTracks.clear();
+    if (mDataSource != NULL) {
+        mDataSource->close();
+    }
 }
 
 status_t NuMediaExtractor::setDataSource(
@@ -110,7 +113,8 @@
         // at the container mime type.
         // The cryptoPluginMode ensures that the extractor will actually
         // give us data in a call to MediaSource::read(), unlike its
-        // default mode that we use from AwesomePlayer.
+        // default mode that we used in AwesomePlayer.
+        // TODO: change default mode
         static_cast<WVMExtractor *>(mImpl.get())->setCryptoPluginMode(true);
     } else if (mImpl->getDrmFlag()) {
         // For all other drm content, we don't want to expose decrypted
@@ -120,15 +124,19 @@
         return ERROR_UNSUPPORTED;
     }
 
-    mDataSource = dataSource;
-
-    updateDurationAndBitrate();
+    status_t err = updateDurationAndBitrate();
+    if (err == OK) {
+        mDataSource = dataSource;
+    }
 
     return OK;
 }
 
 status_t NuMediaExtractor::setDataSource(int fd, off64_t offset, off64_t size) {
 
+    ALOGV("setDataSource fd=%d (%s), offset=%lld, length=%lld",
+            fd, nameForFd(fd).c_str(), (long long) offset, (long long) size);
+
     Mutex::Autolock autoLock(mLock);
 
     if (mImpl != NULL) {
@@ -148,9 +156,10 @@
         return ERROR_UNSUPPORTED;
     }
 
-    mDataSource = fileSource;
-
-    updateDurationAndBitrate();
+    err = updateDurationAndBitrate();
+    if (err == OK) {
+        mDataSource = fileSource;
+    }
 
     return OK;
 }
@@ -173,19 +182,28 @@
         return ERROR_UNSUPPORTED;
     }
 
-    mDataSource = source;
+    err = updateDurationAndBitrate();
+    if (err == OK) {
+        mDataSource = source;
+    }
 
-    updateDurationAndBitrate();
-
-    return OK;
+    return err;
 }
 
-void NuMediaExtractor::updateDurationAndBitrate() {
+status_t NuMediaExtractor::updateDurationAndBitrate() {
+    if (mImpl->countTracks() > kMaxTrackCount) {
+        return ERROR_UNSUPPORTED;
+    }
+
     mTotalBitrate = 0ll;
     mDurationUs = -1ll;
 
     for (size_t i = 0; i < mImpl->countTracks(); ++i) {
         sp<MetaData> meta = mImpl->getTrackMetaData(i);
+        if (meta == NULL) {
+            ALOGW("no metadata for track %zu", i);
+            continue;
+        }
 
         int32_t bitrate;
         if (!meta->findInt32(kKeyBitRate, &bitrate)) {
@@ -204,6 +222,7 @@
             mDurationUs = durationUs;
         }
     }
+    return OK;
 }
 
 size_t NuMediaExtractor::countTracks() const {
@@ -213,7 +232,7 @@
 }
 
 status_t NuMediaExtractor::getTrackFormat(
-        size_t index, sp<AMessage> *format) const {
+        size_t index, sp<AMessage> *format, uint32_t flags) const {
     Mutex::Autolock autoLock(mLock);
 
     *format = NULL;
@@ -226,7 +245,13 @@
         return -ERANGE;
     }
 
-    sp<MetaData> meta = mImpl->getTrackMetaData(index);
+    sp<MetaData> meta = mImpl->getTrackMetaData(index, flags);
+    // Extractors either support trackID-s or not, so either all tracks have trackIDs or none.
+    // Generate trackID if missing.
+    int32_t trackID;
+    if (meta != NULL && !meta->findInt32(kKeyTrackID, &trackID)) {
+        meta->setInt32(kKeyTrackID, (int32_t)index + 1);
+    }
     return convertMetaDataToMessage(meta, format);
 }
 
@@ -278,7 +303,7 @@
         }
     }
 
-    sp<MediaSource> source = mImpl->getTrack(index);
+    sp<IMediaSource> source = mImpl->getTrack(index);
 
     CHECK_EQ((status_t)OK, source->start());
 
@@ -442,6 +467,59 @@
     return OK;
 }
 
+status_t NuMediaExtractor::appendVorbisNumPageSamples(TrackInfo *info, const sp<ABuffer> &buffer) {
+    int32_t numPageSamples;
+    if (!info->mSample->meta_data()->findInt32(
+            kKeyValidSamples, &numPageSamples)) {
+        numPageSamples = -1;
+    }
+
+    memcpy((uint8_t *)buffer->data() + info->mSample->range_length(),
+           &numPageSamples,
+           sizeof(numPageSamples));
+
+    uint32_t type;
+    const void *data;
+    size_t size, size2;
+    if (info->mSample->meta_data()->findData(kKeyEncryptedSizes, &type, &data, &size)) {
+        // Signal numPageSamples (a plain int32_t) is appended at the end,
+        // i.e. sizeof(numPageSamples) plain bytes + 0 encrypted bytes
+        if (SIZE_MAX - size < sizeof(int32_t)) {
+            return -ENOMEM;
+        }
+
+        size_t newSize = size + sizeof(int32_t);
+        sp<ABuffer> abuf = new ABuffer(newSize);
+        uint8_t *adata = static_cast<uint8_t *>(abuf->data());
+        if (adata == NULL) {
+            return -ENOMEM;
+        }
+
+        // append 0 to encrypted sizes
+        int32_t zero = 0;
+        memcpy(adata, data, size);
+        memcpy(adata + size, &zero, sizeof(zero));
+        info->mSample->meta_data()->setData(kKeyEncryptedSizes, type, adata, newSize);
+
+        if (info->mSample->meta_data()->findData(kKeyPlainSizes, &type, &data, &size2)) {
+            if (size2 != size) {
+                return ERROR_MALFORMED;
+            }
+            memcpy(adata, data, size);
+        } else {
+            // if sample meta data does not include plain size array, assume filled with zeros,
+            // i.e. entire buffer is encrypted
+            memset(adata, 0, size);
+        }
+        // append sizeof(numPageSamples) to plain sizes.
+        int32_t int32Size = sizeof(numPageSamples);
+        memcpy(adata + size, &int32Size, sizeof(int32Size));
+        info->mSample->meta_data()->setData(kKeyPlainSizes, type, adata, newSize);
+    }
+
+    return OK;
+}
+
 status_t NuMediaExtractor::readSampleData(const sp<ABuffer> &buffer) {
     Mutex::Autolock autoLock(mLock);
 
@@ -471,21 +549,16 @@
 
     memcpy((uint8_t *)buffer->data(), src, info->mSample->range_length());
 
+    status_t err = OK;
     if (info->mTrackFlags & kIsVorbis) {
-        int32_t numPageSamples;
-        if (!info->mSample->meta_data()->findInt32(
-                    kKeyValidSamples, &numPageSamples)) {
-            numPageSamples = -1;
-        }
-
-        memcpy((uint8_t *)buffer->data() + info->mSample->range_length(),
-               &numPageSamples,
-               sizeof(numPageSamples));
+        err = appendVorbisNumPageSamples(info, buffer);
     }
 
-    buffer->setRange(0, sampleSize);
+    if (err == OK) {
+        buffer->setRange(0, sampleSize);
+    }
 
-    return OK;
+    return err;
 }
 
 status_t NuMediaExtractor::getSampleTrackIndex(size_t *trackIndex) {
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index e69890d..e994069 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -25,19 +25,29 @@
 
 #include <binder/IServiceManager.h>
 #include <media/IMediaPlayerService.h>
+#include <media/IMediaCodecService.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/OMXClient.h>
+#include <cutils/properties.h>
 #include <utils/KeyedVector.h>
 
 #include "include/OMX.h"
 
 namespace android {
 
+static bool sCodecProcessEnabled = true;
+
 struct MuxOMX : public IOMX {
-    MuxOMX(const sp<IOMX> &remoteOMX);
+    MuxOMX(const sp<IOMX> &mediaServerOMX, const sp<IOMX> &mediaCodecOMX);
     virtual ~MuxOMX();
 
-    virtual IBinder *onAsBinder() { return IInterface::asBinder(mRemoteOMX).get(); }
+    // Nobody should be calling this. In case someone does anyway, just
+    // return the media server IOMX.
+    // TODO: return NULL
+    virtual IBinder *onAsBinder() {
+        ALOGE("MuxOMX::onAsBinder should not be called");
+        return IInterface::asBinder(mMediaServerOMX).get();
+    }
 
     virtual bool livesLocally(node_id node, pid_t pid);
 
@@ -45,6 +55,7 @@
 
     virtual status_t allocateNode(
             const char *name, const sp<IOMXObserver> &observer,
+            sp<IBinder> *nodeBinder,
             node_id *node);
 
     virtual status_t freeNode(node_id node);
@@ -82,8 +93,8 @@
             node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
             OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
 
-    virtual status_t enableGraphicBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable);
+    virtual status_t enableNativeBuffers(
+            node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable);
 
     virtual status_t getGraphicBufferUsage(
             node_id node, OMX_U32 port_index, OMX_U32* usage);
@@ -100,8 +111,12 @@
             node_id node, OMX_U32 port_index,
             const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
 
-    virtual status_t createInputSurface(
+    virtual status_t updateNativeHandleInMeta(
             node_id node, OMX_U32 port_index,
+            const sp<NativeHandle> &nativeHandle, buffer_id buffer);
+
+    virtual status_t createInputSurface(
+            node_id node, OMX_U32 port_index, android_dataspace dataSpace,
             sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type);
 
     virtual status_t createPersistentInputSurface(
@@ -114,9 +129,9 @@
 
     virtual status_t signalEndOfInputStream(node_id node);
 
-    virtual status_t allocateBuffer(
+    virtual status_t allocateSecureBuffer(
             node_id node, OMX_U32 port_index, size_t size,
-            buffer_id *buffer, void **buffer_data);
+            buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle);
 
     virtual status_t allocateBufferWithBackup(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
@@ -148,23 +163,32 @@
 private:
     mutable Mutex mLock;
 
-    sp<IOMX> mRemoteOMX;
+    sp<IOMX> mMediaServerOMX;
+    sp<IOMX> mMediaCodecOMX;
     sp<IOMX> mLocalOMX;
 
-    KeyedVector<node_id, bool> mIsLocalNode;
+    typedef enum {
+        LOCAL,
+        MEDIAPROCESS,
+        CODECPROCESS
+    } node_location;
+
+    KeyedVector<node_id, node_location> mNodeLocation;
 
     bool isLocalNode(node_id node) const;
     bool isLocalNode_l(node_id node) const;
     const sp<IOMX> &getOMX(node_id node) const;
     const sp<IOMX> &getOMX_l(node_id node) const;
 
-    static bool CanLiveLocally(const char *name);
+    static node_location getPreferredCodecLocation(const char *name);
 
     DISALLOW_EVIL_CONSTRUCTORS(MuxOMX);
 };
 
-MuxOMX::MuxOMX(const sp<IOMX> &remoteOMX)
-    : mRemoteOMX(remoteOMX) {
+MuxOMX::MuxOMX(const sp<IOMX> &mediaServerOMX, const sp<IOMX> &mediaCodecOMX)
+    : mMediaServerOMX(mediaServerOMX),
+      mMediaCodecOMX(mediaCodecOMX) {
+    ALOGI("MuxOMX ctor");
 }
 
 MuxOMX::~MuxOMX() {
@@ -177,27 +201,55 @@
 }
 
 bool MuxOMX::isLocalNode_l(node_id node) const {
-    return mIsLocalNode.indexOfKey(node) >= 0;
+    return mNodeLocation.valueFor(node) == LOCAL;
 }
 
 // static
-bool MuxOMX::CanLiveLocally(const char *name) {
+MuxOMX::node_location MuxOMX::getPreferredCodecLocation(const char *name) {
+    if (sCodecProcessEnabled) {
+        // all codecs go to codec process unless excluded using system property, in which case
+        // all non-secure decoders, OMX.google.* codecs and encoders can go in the codec process
+        // (non-OMX.google.* encoders can be excluded using system property.)
+        if ((strcasestr(name, "decoder")
+                        && strcasestr(name, ".secure") != name + strlen(name) - 7)
+                || (strcasestr(name, "encoder")
+                        && !property_get_bool("media.stagefright.legacyencoder", false))
+                || !property_get_bool("media.stagefright.less-secure", false)
+                || !strncasecmp(name, "OMX.google.", 11)) {
+            return CODECPROCESS;
+        }
+        // everything else runs in the media server
+        return MEDIAPROCESS;
+    } else {
 #ifdef __LP64__
-    (void)name; // disable unused parameter warning
-    // 64 bit processes always run OMX remote on MediaServer
-    return false;
+        // 64 bit processes always run OMX remote on MediaServer
+        return MEDIAPROCESS;
 #else
-    // 32 bit processes run only OMX.google.* components locally
-    return !strncasecmp(name, "OMX.google.", 11);
+        // 32 bit processes run only OMX.google.* components locally
+        if (!strncasecmp(name, "OMX.google.", 11)) {
+            return LOCAL;
+        }
+        return MEDIAPROCESS;
 #endif
+    }
 }
 
 const sp<IOMX> &MuxOMX::getOMX(node_id node) const {
-    return isLocalNode(node) ? mLocalOMX : mRemoteOMX;
+    Mutex::Autolock autoLock(mLock);
+    return getOMX_l(node);
 }
 
 const sp<IOMX> &MuxOMX::getOMX_l(node_id node) const {
-    return isLocalNode_l(node) ? mLocalOMX : mRemoteOMX;
+    node_location loc = mNodeLocation.valueFor(node);
+    if (loc == LOCAL) {
+        return mLocalOMX;
+    } else if (loc == MEDIAPROCESS) {
+        return mMediaServerOMX;
+    } else if (loc == CODECPROCESS) {
+        return mMediaCodecOMX;
+    }
+    ALOGE("Couldn't determine node location for node %d: %d, using local", node, loc);
+    return mLocalOMX;
 }
 
 bool MuxOMX::livesLocally(node_id node, pid_t pid) {
@@ -216,29 +268,34 @@
 
 status_t MuxOMX::allocateNode(
         const char *name, const sp<IOMXObserver> &observer,
+        sp<IBinder> *nodeBinder,
         node_id *node) {
     Mutex::Autolock autoLock(mLock);
 
     sp<IOMX> omx;
 
-    if (CanLiveLocally(name)) {
+    node_location loc = getPreferredCodecLocation(name);
+    if (loc == CODECPROCESS) {
+        omx = mMediaCodecOMX;
+    } else if (loc == MEDIAPROCESS) {
+        omx = mMediaServerOMX;
+    } else {
         if (mLocalOMX == NULL) {
             mLocalOMX = new OMX;
         }
         omx = mLocalOMX;
-    } else {
-        omx = mRemoteOMX;
     }
 
-    status_t err = omx->allocateNode(name, observer, node);
+    status_t err = omx->allocateNode(name, observer, nodeBinder, node);
+    ALOGV("allocated node_id %x on %s OMX", *node, omx == mMediaCodecOMX ? "codecprocess" :
+            omx == mMediaServerOMX ? "mediaserver" : "local");
+
 
     if (err != OK) {
         return err;
     }
 
-    if (omx == mLocalOMX) {
-        mIsLocalNode.add(*node, true);
-    }
+    mNodeLocation.add(*node, loc);
 
     return OK;
 }
@@ -252,7 +309,7 @@
         return err;
     }
 
-    mIsLocalNode.removeItem(node);
+    mNodeLocation.removeItem(node);
 
     return OK;
 }
@@ -310,9 +367,9 @@
             node, portIndex, enable, audioHwSync, sidebandHandle);
 }
 
-status_t MuxOMX::enableGraphicBuffers(
-        node_id node, OMX_U32 port_index, OMX_BOOL enable) {
-    return getOMX(node)->enableGraphicBuffers(node, port_index, enable);
+status_t MuxOMX::enableNativeBuffers(
+        node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
+    return getOMX(node)->enableNativeBuffers(node, port_index, graphic, enable);
 }
 
 status_t MuxOMX::getGraphicBufferUsage(
@@ -340,19 +397,34 @@
             node, port_index, graphicBuffer, buffer);
 }
 
-status_t MuxOMX::createInputSurface(
+status_t MuxOMX::updateNativeHandleInMeta(
         node_id node, OMX_U32 port_index,
+        const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
+    return getOMX(node)->updateNativeHandleInMeta(
+            node, port_index, nativeHandle, buffer);
+}
+
+status_t MuxOMX::createInputSurface(
+        node_id node, OMX_U32 port_index, android_dataspace dataSpace,
         sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
     status_t err = getOMX(node)->createInputSurface(
-            node, port_index, bufferProducer, type);
+            node, port_index, dataSpace, bufferProducer, type);
     return err;
 }
 
 status_t MuxOMX::createPersistentInputSurface(
         sp<IGraphicBufferProducer> *bufferProducer,
         sp<IGraphicBufferConsumer> *bufferConsumer) {
-    // TODO: local or remote? Always use remote for now
-    return mRemoteOMX->createPersistentInputSurface(
+    sp<IOMX> omx;
+    {
+        Mutex::Autolock autoLock(mLock);
+        if (property_get_bool("media.stagefright.legacyencoder", false)) {
+            omx = mMediaServerOMX;
+        } else {
+            omx = mMediaCodecOMX;
+        }
+    }
+    return omx->createPersistentInputSurface(
             bufferProducer, bufferConsumer);
 }
 
@@ -366,11 +438,11 @@
     return getOMX(node)->signalEndOfInputStream(node);
 }
 
-status_t MuxOMX::allocateBuffer(
+status_t MuxOMX::allocateSecureBuffer(
         node_id node, OMX_U32 port_index, size_t size,
-        buffer_id *buffer, void **buffer_data) {
-    return getOMX(node)->allocateBuffer(
-            node, port_index, size, buffer, buffer_data);
+        buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
+    return getOMX(node)->allocateSecureBuffer(
+            node, port_index, size, buffer, buffer_data, native_handle);
 }
 
 status_t MuxOMX::allocateBufferWithBackup(
@@ -415,29 +487,53 @@
 }
 
 OMXClient::OMXClient() {
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get("media.stagefright.codecremote", value, NULL)
+            && (!strcmp("0", value) || !strcasecmp("false", value))) {
+        sCodecProcessEnabled = false;
+    }
 }
 
 status_t OMXClient::connect() {
     sp<IServiceManager> sm = defaultServiceManager();
-    sp<IBinder> binder = sm->getService(String16("media.player"));
-    sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
+    sp<IBinder> playerbinder = sm->getService(String16("media.player"));
+    sp<IMediaPlayerService> mediaservice = interface_cast<IMediaPlayerService>(playerbinder);
 
-    if (service.get() == NULL) {
+    if (mediaservice.get() == NULL) {
         ALOGE("Cannot obtain IMediaPlayerService");
         return NO_INIT;
     }
 
-    mOMX = service->getOMX();
-    if (mOMX.get() == NULL) {
-        ALOGE("Cannot obtain IOMX");
+    sp<IOMX> mediaServerOMX = mediaservice->getOMX();
+    if (mediaServerOMX.get() == NULL) {
+        ALOGE("Cannot obtain mediaserver IOMX");
         return NO_INIT;
     }
 
-    if (!mOMX->livesLocally(0 /* node */, getpid())) {
-        ALOGI("Using client-side OMX mux.");
-        mOMX = new MuxOMX(mOMX);
+    // If we don't want to use the codec process, and the media server OMX
+    // is local, use it directly instead of going through MuxOMX
+    if (!sCodecProcessEnabled &&
+            mediaServerOMX->livesLocally(0 /* node */, getpid())) {
+        mOMX = mediaServerOMX;
+        return OK;
     }
 
+    sp<IBinder> codecbinder = sm->getService(String16("media.codec"));
+    sp<IMediaCodecService> codecservice = interface_cast<IMediaCodecService>(codecbinder);
+
+    if (codecservice.get() == NULL) {
+        ALOGE("Cannot obtain IMediaCodecService");
+        return NO_INIT;
+    }
+
+    sp<IOMX> mediaCodecOMX = codecservice->getOMX();
+    if (mediaCodecOMX.get() == NULL) {
+        ALOGE("Cannot obtain mediacodec IOMX");
+        return NO_INIT;
+    }
+
+    mOMX = new MuxOMX(mediaServerOMX, mediaCodecOMX);
+
     return OK;
 }
 
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
deleted file mode 100644
index 7e15e18..0000000
--- a/media/libstagefright/OMXCodec.cpp
+++ /dev/null
@@ -1,4412 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <inttypes.h>
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "OMXCodec"
-
-#ifdef __LP64__
-#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
-#endif
-
-#include <utils/Log.h>
-
-#include "include/AACEncoder.h"
-
-#include "include/ESDS.h"
-
-#include <binder/IServiceManager.h>
-#include <binder/MemoryDealer.h>
-#include <binder/ProcessState.h>
-#include <HardwareAPI.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/IMediaPlayerService.h>
-#include <media/stagefright/ACodec.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaCodecList.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXCodec.h>
-#include <media/stagefright/SurfaceUtils.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/SkipCutBuffer.h>
-#include <utils/Vector.h>
-
-#include <OMX_AudioExt.h>
-#include <OMX_Component.h>
-#include <OMX_IndexExt.h>
-#include <OMX_VideoExt.h>
-#include <OMX_AsString.h>
-
-#include "include/avc_utils.h"
-
-namespace android {
-
-// Treat time out as an error if we have not received any output
-// buffers after 3 seconds.
-const static int64_t kBufferFilledEventTimeOutNs = 3000000000LL;
-
-// OMX Spec defines less than 50 color formats. If the query for
-// color format is executed for more than kMaxColorFormatSupported,
-// the query will fail to avoid looping forever.
-// 1000 is more than enough for us to tell whether the omx
-// component in question is buggy or not.
-const static uint32_t kMaxColorFormatSupported = 1000;
-
-#define FACTORY_CREATE_ENCODER(name) \
-static sp<MediaSource> Make##name(const sp<MediaSource> &source, const sp<MetaData> &meta) { \
-    return new name(source, meta); \
-}
-
-#define FACTORY_REF(name) { #name, Make##name },
-
-FACTORY_CREATE_ENCODER(AACEncoder)
-
-static sp<MediaSource> InstantiateSoftwareEncoder(
-        const char *name, const sp<MediaSource> &source,
-        const sp<MetaData> &meta) {
-    struct FactoryInfo {
-        const char *name;
-        sp<MediaSource> (*CreateFunc)(const sp<MediaSource> &, const sp<MetaData> &);
-    };
-
-    static const FactoryInfo kFactoryInfo[] = {
-        FACTORY_REF(AACEncoder)
-    };
-    for (size_t i = 0;
-         i < sizeof(kFactoryInfo) / sizeof(kFactoryInfo[0]); ++i) {
-        if (!strcmp(name, kFactoryInfo[i].name)) {
-            return (*kFactoryInfo[i].CreateFunc)(source, meta);
-        }
-    }
-
-    return NULL;
-}
-
-#undef FACTORY_CREATE_ENCODER
-#undef FACTORY_REF
-
-#define CODEC_LOGI(x, ...) ALOGI("[%s] " x, mComponentName, ##__VA_ARGS__)
-#define CODEC_LOGV(x, ...) ALOGV("[%s] " x, mComponentName, ##__VA_ARGS__)
-#define CODEC_LOGW(x, ...) ALOGW("[%s] " x, mComponentName, ##__VA_ARGS__)
-#define CODEC_LOGE(x, ...) ALOGE("[%s] " x, mComponentName, ##__VA_ARGS__)
-
-struct OMXCodecObserver : public BnOMXObserver {
-    OMXCodecObserver() {
-    }
-
-    void setCodec(const sp<OMXCodec> &target) {
-        mTarget = target;
-    }
-
-    // from IOMXObserver
-    virtual void onMessages(const std::list<omx_message> &messages) {
-        sp<OMXCodec> codec = mTarget.promote();
-
-        if (codec.get() != NULL) {
-            Mutex::Autolock autoLock(codec->mLock);
-            for (std::list<omx_message>::const_iterator it = messages.cbegin();
-                  it != messages.cend(); ++it) {
-                codec->on_message(*it);
-            }
-            codec.clear();
-        }
-    }
-
-protected:
-    virtual ~OMXCodecObserver() {}
-
-private:
-    wp<OMXCodec> mTarget;
-
-    OMXCodecObserver(const OMXCodecObserver &);
-    OMXCodecObserver &operator=(const OMXCodecObserver &);
-};
-
-template<class T>
-static void InitOMXParams(T *params) {
-    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(sizeof(OMX_PTR) == 4); // check OMX_PTR is 4 bytes.
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
-static bool IsSoftwareCodec(const char *componentName) {
-    if (!strncmp("OMX.google.", componentName, 11)) {
-        return true;
-    }
-
-    if (!strncmp("OMX.", componentName, 4)) {
-        return false;
-    }
-
-    return true;
-}
-
-// A sort order in which OMX software codecs are first, followed
-// by other (non-OMX) software codecs, followed by everything else.
-static int CompareSoftwareCodecsFirst(
-        const OMXCodec::CodecNameAndQuirks *elem1,
-        const OMXCodec::CodecNameAndQuirks *elem2) {
-    bool isOMX1 = !strncmp(elem1->mName.string(), "OMX.", 4);
-    bool isOMX2 = !strncmp(elem2->mName.string(), "OMX.", 4);
-
-    bool isSoftwareCodec1 = IsSoftwareCodec(elem1->mName.string());
-    bool isSoftwareCodec2 = IsSoftwareCodec(elem2->mName.string());
-
-    if (isSoftwareCodec1) {
-        if (!isSoftwareCodec2) { return -1; }
-
-        if (isOMX1) {
-            if (isOMX2) { return 0; }
-
-            return -1;
-        } else {
-            if (isOMX2) { return 0; }
-
-            return 1;
-        }
-
-        return -1;
-    }
-
-    if (isSoftwareCodec2) {
-        return 1;
-    }
-
-    return 0;
-}
-
-// static
-void OMXCodec::findMatchingCodecs(
-        const char *mime,
-        bool createEncoder, const char *matchComponentName,
-        uint32_t flags,
-        Vector<CodecNameAndQuirks> *matchingCodecs) {
-    matchingCodecs->clear();
-
-    const sp<IMediaCodecList> list = MediaCodecList::getInstance();
-    if (list == NULL) {
-        return;
-    }
-
-    size_t index = 0;
-    for (;;) {
-        ssize_t matchIndex =
-            list->findCodecByType(mime, createEncoder, index);
-
-        if (matchIndex < 0) {
-            break;
-        }
-
-        index = matchIndex + 1;
-
-        const sp<MediaCodecInfo> info = list->getCodecInfo(matchIndex);
-        CHECK(info != NULL);
-        const char *componentName = info->getCodecName();
-
-        // If a specific codec is requested, skip the non-matching ones.
-        if (matchComponentName && strcmp(componentName, matchComponentName)) {
-            continue;
-        }
-
-        // When requesting software-only codecs, only push software codecs
-        // When requesting hardware-only codecs, only push hardware codecs
-        // When there is request neither for software-only nor for
-        // hardware-only codecs, push all codecs
-        if (((flags & kSoftwareCodecsOnly) &&   IsSoftwareCodec(componentName)) ||
-            ((flags & kHardwareCodecsOnly) &&  !IsSoftwareCodec(componentName)) ||
-            (!(flags & (kSoftwareCodecsOnly | kHardwareCodecsOnly)))) {
-
-            ssize_t index = matchingCodecs->add();
-            CodecNameAndQuirks *entry = &matchingCodecs->editItemAt(index);
-            entry->mName = String8(componentName);
-            entry->mQuirks = getComponentQuirks(info);
-
-            ALOGV("matching '%s' quirks 0x%08x",
-                  entry->mName.string(), entry->mQuirks);
-        }
-    }
-
-    if (flags & kPreferSoftwareCodecs) {
-        matchingCodecs->sort(CompareSoftwareCodecsFirst);
-    }
-}
-
-// static
-uint32_t OMXCodec::getComponentQuirks(
-        const sp<MediaCodecInfo> &info) {
-    uint32_t quirks = 0;
-    if (info->hasQuirk("requires-allocate-on-input-ports")) {
-        quirks |= kRequiresAllocateBufferOnInputPorts;
-    }
-    if (info->hasQuirk("requires-allocate-on-output-ports")) {
-        quirks |= kRequiresAllocateBufferOnOutputPorts;
-    }
-    if (info->hasQuirk("output-buffers-are-unreadable")) {
-        quirks |= kOutputBuffersAreUnreadable;
-    }
-
-    return quirks;
-}
-
-// static
-bool OMXCodec::findCodecQuirks(const char *componentName, uint32_t *quirks) {
-    const sp<IMediaCodecList> list = MediaCodecList::getInstance();
-    if (list == NULL) {
-        return false;
-    }
-
-    ssize_t index = list->findCodecByName(componentName);
-
-    if (index < 0) {
-        return false;
-    }
-
-    const sp<MediaCodecInfo> info = list->getCodecInfo(index);
-    CHECK(info != NULL);
-    *quirks = getComponentQuirks(info);
-
-    return true;
-}
-
-// static
-sp<MediaSource> OMXCodec::Create(
-        const sp<IOMX> &omx,
-        const sp<MetaData> &meta, bool createEncoder,
-        const sp<MediaSource> &source,
-        const char *matchComponentName,
-        uint32_t flags,
-        const sp<ANativeWindow> &nativeWindow) {
-    int32_t requiresSecureBuffers;
-    if (source->getFormat()->findInt32(
-                kKeyRequiresSecureBuffers,
-                &requiresSecureBuffers)
-            && requiresSecureBuffers) {
-        flags |= kIgnoreCodecSpecificData;
-        flags |= kUseSecureInputBuffers;
-    }
-
-    const char *mime;
-    bool success = meta->findCString(kKeyMIMEType, &mime);
-    CHECK(success);
-
-    Vector<CodecNameAndQuirks> matchingCodecs;
-    findMatchingCodecs(
-            mime, createEncoder, matchComponentName, flags, &matchingCodecs);
-
-    if (matchingCodecs.isEmpty()) {
-        ALOGV("No matching codecs! (mime: %s, createEncoder: %s, "
-                "matchComponentName: %s, flags: 0x%x)",
-                mime, createEncoder ? "true" : "false", matchComponentName, flags);
-        return NULL;
-    }
-
-    sp<OMXCodecObserver> observer = new OMXCodecObserver;
-    IOMX::node_id node = 0;
-
-    for (size_t i = 0; i < matchingCodecs.size(); ++i) {
-        const char *componentNameBase = matchingCodecs[i].mName.string();
-        uint32_t quirks = matchingCodecs[i].mQuirks;
-        const char *componentName = componentNameBase;
-
-        AString tmp;
-        if (flags & kUseSecureInputBuffers) {
-            tmp = componentNameBase;
-            tmp.append(".secure");
-
-            componentName = tmp.c_str();
-        }
-
-        if (createEncoder) {
-            sp<MediaSource> softwareCodec =
-                InstantiateSoftwareEncoder(componentName, source, meta);
-
-            if (softwareCodec != NULL) {
-                ALOGV("Successfully allocated software codec '%s'", componentName);
-
-                return softwareCodec;
-            }
-        }
-
-        ALOGV("Attempting to allocate OMX node '%s'", componentName);
-
-        status_t err = omx->allocateNode(componentName, observer, &node);
-        if (err == OK) {
-            ALOGV("Successfully allocated OMX node '%s'", componentName);
-
-            sp<OMXCodec> codec = new OMXCodec(
-                    omx, node, quirks, flags,
-                    createEncoder, mime, componentName,
-                    source, nativeWindow);
-
-            observer->setCodec(codec);
-
-            err = codec->configureCodec(meta);
-            if (err == OK) {
-                return codec;
-            }
-
-            ALOGV("Failed to configure codec '%s'", componentName);
-        }
-    }
-
-    return NULL;
-}
-
-status_t OMXCodec::parseHEVCCodecSpecificData(
-        const void *data, size_t size,
-        unsigned *profile, unsigned *level) {
-    const uint8_t *ptr = (const uint8_t *)data;
-
-    // verify minimum size and configurationVersion == 1.
-    if (size < 7 || ptr[0] != 1) {
-        return ERROR_MALFORMED;
-    }
-
-    *profile = (ptr[1] & 31);
-    *level = ptr[12];
-
-    ptr += 22;
-    size -= 22;
-
-    size_t numofArrays = (char)ptr[0];
-    ptr += 1;
-    size -= 1;
-    size_t j = 0, i = 0;
-    for (i = 0; i < numofArrays; i++) {
-        ptr += 1;
-        size -= 1;
-
-        // Num of nals
-        size_t numofNals = U16_AT(ptr);
-        ptr += 2;
-        size -= 2;
-
-        for (j = 0;j < numofNals;j++) {
-            if (size < 2) {
-                return ERROR_MALFORMED;
-            }
-
-            size_t length = U16_AT(ptr);
-
-            ptr += 2;
-            size -= 2;
-
-            if (size < length) {
-                return ERROR_MALFORMED;
-            }
-            addCodecSpecificData(ptr, length);
-
-            ptr += length;
-            size -= length;
-        }
-    }
-    return OK;
-}
-
-status_t OMXCodec::parseAVCCodecSpecificData(
-        const void *data, size_t size,
-        unsigned *profile, unsigned *level) {
-    const uint8_t *ptr = (const uint8_t *)data;
-
-    // verify minimum size and configurationVersion == 1.
-    if (size < 7 || ptr[0] != 1) {
-        return ERROR_MALFORMED;
-    }
-
-    *profile = ptr[1];
-    *level = ptr[3];
-
-    // There is decodable content out there that fails the following
-    // assertion, let's be lenient for now...
-    // CHECK((ptr[4] >> 2) == 0x3f);  // reserved
-
-    size_t lengthSize __unused = 1 + (ptr[4] & 3);
-
-    // commented out check below as H264_QVGA_500_NO_AUDIO.3gp
-    // violates it...
-    // CHECK((ptr[5] >> 5) == 7);  // reserved
-
-    size_t numSeqParameterSets = ptr[5] & 31;
-
-    ptr += 6;
-    size -= 6;
-
-    for (size_t i = 0; i < numSeqParameterSets; ++i) {
-        if (size < 2) {
-            return ERROR_MALFORMED;
-        }
-
-        size_t length = U16_AT(ptr);
-
-        ptr += 2;
-        size -= 2;
-
-        if (size < length) {
-            return ERROR_MALFORMED;
-        }
-
-        addCodecSpecificData(ptr, length);
-
-        ptr += length;
-        size -= length;
-    }
-
-    if (size < 1) {
-        return ERROR_MALFORMED;
-    }
-
-    size_t numPictureParameterSets = *ptr;
-    ++ptr;
-    --size;
-
-    for (size_t i = 0; i < numPictureParameterSets; ++i) {
-        if (size < 2) {
-            return ERROR_MALFORMED;
-        }
-
-        size_t length = U16_AT(ptr);
-
-        ptr += 2;
-        size -= 2;
-
-        if (size < length) {
-            return ERROR_MALFORMED;
-        }
-
-        addCodecSpecificData(ptr, length);
-
-        ptr += length;
-        size -= length;
-    }
-
-    return OK;
-}
-
-status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
-    ALOGV("configureCodec protected=%d",
-         (mFlags & kEnableGrallocUsageProtected) ? 1 : 0);
-
-    if (!(mFlags & kIgnoreCodecSpecificData)) {
-        uint32_t type;
-        const void *data;
-        size_t size;
-        if (meta->findData(kKeyESDS, &type, &data, &size)) {
-            ESDS esds((const char *)data, size);
-            CHECK_EQ(esds.InitCheck(), (status_t)OK);
-
-            const void *codec_specific_data;
-            size_t codec_specific_data_size;
-            esds.getCodecSpecificInfo(
-                    &codec_specific_data, &codec_specific_data_size);
-
-            addCodecSpecificData(
-                    codec_specific_data, codec_specific_data_size);
-        } else if (meta->findData(kKeyAVCC, &type, &data, &size)) {
-            // Parse the AVCDecoderConfigurationRecord
-
-            unsigned profile, level;
-            status_t err;
-            if ((err = parseAVCCodecSpecificData(
-                            data, size, &profile, &level)) != OK) {
-                ALOGE("Malformed AVC codec specific data.");
-                return err;
-            }
-
-            CODEC_LOGI(
-                    "AVC profile = %u (%s), level = %u",
-                    profile, AVCProfileToString(profile), level);
-        } else if (meta->findData(kKeyHVCC, &type, &data, &size)) {
-            // Parse the HEVCDecoderConfigurationRecord
-
-            unsigned profile, level;
-            status_t err;
-            if ((err = parseHEVCCodecSpecificData(
-                            data, size, &profile, &level)) != OK) {
-                ALOGE("Malformed HEVC codec specific data.");
-                return err;
-            }
-
-            CODEC_LOGI(
-                    "HEVC profile = %u , level = %u",
-                    profile, level);
-        } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
-            addCodecSpecificData(data, size);
-
-            CHECK(meta->findData(kKeyVorbisBooks, &type, &data, &size));
-            addCodecSpecificData(data, size);
-        } else if (meta->findData(kKeyOpusHeader, &type, &data, &size)) {
-            addCodecSpecificData(data, size);
-
-            CHECK(meta->findData(kKeyOpusCodecDelay, &type, &data, &size));
-            addCodecSpecificData(data, size);
-            CHECK(meta->findData(kKeyOpusSeekPreRoll, &type, &data, &size));
-            addCodecSpecificData(data, size);
-        }
-    }
-
-    int32_t bitRate = 0;
-    if (mIsEncoder) {
-        CHECK(meta->findInt32(kKeyBitRate, &bitRate));
-    }
-    if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, mMIME)) {
-        setAMRFormat(false /* isWAMR */, bitRate);
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, mMIME)) {
-        setAMRFormat(true /* isWAMR */, bitRate);
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mMIME)) {
-        int32_t numChannels, sampleRate, aacProfile;
-        CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
-        CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
-
-        if (!meta->findInt32(kKeyAACProfile, &aacProfile)) {
-            aacProfile = OMX_AUDIO_AACObjectNull;
-        }
-
-        int32_t isADTS;
-        if (!meta->findInt32(kKeyIsADTS, &isADTS)) {
-            isADTS = false;
-        }
-
-        status_t err = setAACFormat(numChannels, sampleRate, bitRate, aacProfile, isADTS);
-        if (err != OK) {
-            CODEC_LOGE("setAACFormat() failed (err = %d)", err);
-            return err;
-        }
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_MPEG, mMIME)) {
-        int32_t numChannels, sampleRate;
-        if (meta->findInt32(kKeyChannelCount, &numChannels)
-                && meta->findInt32(kKeySampleRate, &sampleRate)) {
-            // Since we did not always check for these, leave them optional
-            // and have the decoder figure it all out.
-            setRawAudioFormat(
-                    mIsEncoder ? kPortIndexInput : kPortIndexOutput,
-                    sampleRate,
-                    numChannels);
-        }
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mMIME)) {
-        int32_t numChannels;
-        int32_t sampleRate;
-        CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
-        CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
-
-        status_t err = setAC3Format(numChannels, sampleRate);
-        if (err != OK) {
-            CODEC_LOGE("setAC3Format() failed (err = %d)", err);
-            return err;
-        }
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_ALAW, mMIME)
-            || !strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_MLAW, mMIME)) {
-        // These are PCM-like formats with a fixed sample rate but
-        // a variable number of channels.
-
-        int32_t sampleRate;
-        int32_t numChannels;
-        CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
-        if (!meta->findInt32(kKeySampleRate, &sampleRate)) {
-            sampleRate = 8000;
-        }
-
-        setG711Format(sampleRate, numChannels);
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mMIME)) {
-        CHECK(!mIsEncoder);
-
-        int32_t numChannels, sampleRate;
-        CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
-        CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
-
-        setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
-    }
-
-    if (!strncasecmp(mMIME, "video/", 6)) {
-
-        if (mIsEncoder) {
-            setVideoInputFormat(mMIME, meta);
-        } else {
-            status_t err = setVideoOutputFormat(
-                    mMIME, meta);
-
-            if (err != OK) {
-                return err;
-            }
-        }
-    }
-
-    int32_t maxInputSize;
-    if (meta->findInt32(kKeyMaxInputSize, &maxInputSize)) {
-        setMinBufferSize(kPortIndexInput, (OMX_U32)maxInputSize);
-    }
-
-    initOutputFormat(meta);
-
-    if (mNativeWindow != NULL
-        && !mIsEncoder
-        && !strncasecmp(mMIME, "video/", 6)
-        && !strncmp(mComponentName, "OMX.", 4)) {
-        status_t err = initNativeWindow();
-        if (err != OK) {
-            return err;
-        }
-    }
-
-    return OK;
-}
-
-void OMXCodec::setMinBufferSize(OMX_U32 portIndex, OMX_U32 size) {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = portIndex;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    if ((portIndex == kPortIndexInput && (mQuirks & kInputBufferSizesAreBogus))
-        || (def.nBufferSize < size)) {
-        def.nBufferSize = size;
-    }
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    // Make sure the setting actually stuck.
-    if (portIndex == kPortIndexInput
-            && (mQuirks & kInputBufferSizesAreBogus)) {
-        CHECK_EQ(def.nBufferSize, size);
-    } else {
-        CHECK(def.nBufferSize >= size);
-    }
-}
-
-status_t OMXCodec::setVideoPortFormatType(
-        OMX_U32 portIndex,
-        OMX_VIDEO_CODINGTYPE compressionFormat,
-        OMX_COLOR_FORMATTYPE colorFormat) {
-    OMX_VIDEO_PARAM_PORTFORMATTYPE format;
-    InitOMXParams(&format);
-    format.nPortIndex = portIndex;
-    format.nIndex = 0;
-    bool found = false;
-
-    OMX_U32 index = 0;
-    for (;;) {
-        format.nIndex = index;
-        status_t err = mOMX->getParameter(
-                mNode, OMX_IndexParamVideoPortFormat,
-                &format, sizeof(format));
-
-        if (err != OK) {
-            return err;
-        }
-
-        // The following assertion is violated by TI's video decoder.
-        // CHECK_EQ(format.nIndex, index);
-
-#if 1
-        CODEC_LOGV("portIndex: %u, index: %u, eCompressionFormat=%d eColorFormat=%d",
-             portIndex,
-             index, format.eCompressionFormat, format.eColorFormat);
-#endif
-
-        if (format.eCompressionFormat == compressionFormat
-                && format.eColorFormat == colorFormat) {
-            found = true;
-            break;
-        }
-
-        ++index;
-        if (index >= kMaxColorFormatSupported) {
-            CODEC_LOGE("color format %d or compression format %d is not supported",
-                colorFormat, compressionFormat);
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    if (!found) {
-        return UNKNOWN_ERROR;
-    }
-
-    CODEC_LOGV("found a match.");
-    status_t err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoPortFormat,
-            &format, sizeof(format));
-
-    return err;
-}
-
-static size_t getFrameSize(
-        OMX_COLOR_FORMATTYPE colorFormat, int32_t width, int32_t height) {
-    switch (colorFormat) {
-        case OMX_COLOR_FormatYCbYCr:
-        case OMX_COLOR_FormatCbYCrY:
-            return width * height * 2;
-
-        case OMX_COLOR_FormatYUV420Planar:
-        case OMX_COLOR_FormatYUV420SemiPlanar:
-        case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
-        /*
-        * FIXME: For the Opaque color format, the frame size does not
-        * need to be (w*h*3)/2. It just needs to
-        * be larger than certain minimum buffer size. However,
-        * currently, this opaque foramt has been tested only on
-        * YUV420 formats. If that is changed, then we need to revisit
-        * this part in the future
-        */
-        case OMX_COLOR_FormatAndroidOpaque:
-            return (width * height * 3) / 2;
-
-        default:
-            CHECK(!"Should not be here. Unsupported color format.");
-            break;
-    }
-    return 0;
-}
-
-status_t OMXCodec::findTargetColorFormat(
-        const sp<MetaData>& meta, OMX_COLOR_FORMATTYPE *colorFormat) {
-    ALOGV("findTargetColorFormat");
-    CHECK(mIsEncoder);
-
-    *colorFormat = OMX_COLOR_FormatYUV420SemiPlanar;
-    int32_t targetColorFormat;
-    if (meta->findInt32(kKeyColorFormat, &targetColorFormat)) {
-        *colorFormat = (OMX_COLOR_FORMATTYPE) targetColorFormat;
-    }
-
-    // Check whether the target color format is supported.
-    return isColorFormatSupported(*colorFormat, kPortIndexInput);
-}
-
-status_t OMXCodec::isColorFormatSupported(
-        OMX_COLOR_FORMATTYPE colorFormat, int portIndex) {
-    ALOGV("isColorFormatSupported: %d", static_cast<int>(colorFormat));
-
-    // Enumerate all the color formats supported by
-    // the omx component to see whether the given
-    // color format is supported.
-    OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
-    InitOMXParams(&portFormat);
-    portFormat.nPortIndex = portIndex;
-    OMX_U32 index = 0;
-    portFormat.nIndex = index;
-    while (true) {
-        if (OMX_ErrorNone != mOMX->getParameter(
-                mNode, OMX_IndexParamVideoPortFormat,
-                &portFormat, sizeof(portFormat))) {
-            break;
-        }
-        // Make sure that omx component does not overwrite
-        // the incremented index (bug 2897413).
-        CHECK_EQ(index, portFormat.nIndex);
-        if (portFormat.eColorFormat == colorFormat) {
-            CODEC_LOGV("Found supported color format: %d", portFormat.eColorFormat);
-            return OK;  // colorFormat is supported!
-        }
-        ++index;
-        portFormat.nIndex = index;
-
-        if (index >= kMaxColorFormatSupported) {
-            CODEC_LOGE("More than %u color formats are supported???", index);
-            break;
-        }
-    }
-
-    CODEC_LOGE("color format %d is not supported", colorFormat);
-    return UNKNOWN_ERROR;
-}
-
-void OMXCodec::setVideoInputFormat(
-        const char *mime, const sp<MetaData>& meta) {
-
-    int32_t width, height, frameRate, bitRate, stride, sliceHeight;
-    bool success = meta->findInt32(kKeyWidth, &width);
-    success = success && meta->findInt32(kKeyHeight, &height);
-    success = success && meta->findInt32(kKeyFrameRate, &frameRate);
-    success = success && meta->findInt32(kKeyBitRate, &bitRate);
-    success = success && meta->findInt32(kKeyStride, &stride);
-    success = success && meta->findInt32(kKeySliceHeight, &sliceHeight);
-    CHECK(success);
-    CHECK(stride != 0);
-
-    OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
-    if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
-        compressionFormat = OMX_VIDEO_CodingAVC;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
-        compressionFormat = OMX_VIDEO_CodingHEVC;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
-        compressionFormat = OMX_VIDEO_CodingMPEG4;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
-        compressionFormat = OMX_VIDEO_CodingH263;
-    } else {
-        ALOGE("Not a supported video mime type: %s", mime);
-        CHECK(!"Should not be here. Not a supported video mime type.");
-    }
-
-    OMX_COLOR_FORMATTYPE colorFormat;
-    CHECK_EQ((status_t)OK, findTargetColorFormat(meta, &colorFormat));
-
-    status_t err;
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
-
-    //////////////////////// Input port /////////////////////////
-    CHECK_EQ(setVideoPortFormatType(
-            kPortIndexInput, OMX_VIDEO_CodingUnused,
-            colorFormat), (status_t)OK);
-
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexInput;
-
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    def.nBufferSize = getFrameSize(colorFormat,
-            stride > 0? stride: -stride, sliceHeight);
-
-    CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
-
-    video_def->nFrameWidth = width;
-    video_def->nFrameHeight = height;
-    video_def->nStride = stride;
-    video_def->nSliceHeight = sliceHeight;
-    video_def->xFramerate = (frameRate << 16);  // Q16 format
-    video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
-    video_def->eColorFormat = colorFormat;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    //////////////////////// Output port /////////////////////////
-    CHECK_EQ(setVideoPortFormatType(
-            kPortIndexOutput, compressionFormat, OMX_COLOR_FormatUnused),
-            (status_t)OK);
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexOutput;
-
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-
-    CHECK_EQ(err, (status_t)OK);
-    CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
-
-    video_def->nFrameWidth = width;
-    video_def->nFrameHeight = height;
-    video_def->xFramerate = 0;      // No need for output port
-    video_def->nBitrate = bitRate;  // Q16 format
-    video_def->eCompressionFormat = compressionFormat;
-    video_def->eColorFormat = OMX_COLOR_FormatUnused;
-    if (mQuirks & kRequiresLargerEncoderOutputBuffer) {
-        // Increases the output buffer size
-        def.nBufferSize = ((def.nBufferSize * 3) >> 1);
-    }
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    /////////////////// Codec-specific ////////////////////////
-    switch (compressionFormat) {
-        case OMX_VIDEO_CodingMPEG4:
-        {
-            CHECK_EQ(setupMPEG4EncoderParameters(meta), (status_t)OK);
-            break;
-        }
-
-        case OMX_VIDEO_CodingH263:
-            CHECK_EQ(setupH263EncoderParameters(meta), (status_t)OK);
-            break;
-
-        case OMX_VIDEO_CodingAVC:
-        {
-            CHECK_EQ(setupAVCEncoderParameters(meta), (status_t)OK);
-            break;
-        }
-
-        default:
-            CHECK(!"Support for this compressionFormat to be implemented.");
-            break;
-    }
-}
-
-static OMX_U32 setPFramesSpacing(int32_t iFramesInterval, int32_t frameRate) {
-    if (iFramesInterval < 0) {
-        return 0xFFFFFFFF;
-    } else if (iFramesInterval == 0) {
-        return 0;
-    }
-    OMX_U32 ret = frameRate * iFramesInterval - 1;
-    return ret;
-}
-
-status_t OMXCodec::setupErrorCorrectionParameters() {
-    OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE errorCorrectionType;
-    InitOMXParams(&errorCorrectionType);
-    errorCorrectionType.nPortIndex = kPortIndexOutput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoErrorCorrection,
-            &errorCorrectionType, sizeof(errorCorrectionType));
-    if (err != OK) {
-        ALOGW("Error correction param query is not supported");
-        return OK;  // Optional feature. Ignore this failure
-    }
-
-    errorCorrectionType.bEnableHEC = OMX_FALSE;
-    errorCorrectionType.bEnableResync = OMX_TRUE;
-    errorCorrectionType.nResynchMarkerSpacing = 256;
-    errorCorrectionType.bEnableDataPartitioning = OMX_FALSE;
-    errorCorrectionType.bEnableRVLC = OMX_FALSE;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoErrorCorrection,
-            &errorCorrectionType, sizeof(errorCorrectionType));
-    if (err != OK) {
-        ALOGW("Error correction param configuration is not supported");
-    }
-
-    // Optional feature. Ignore the failure.
-    return OK;
-}
-
-status_t OMXCodec::setupBitRate(int32_t bitRate) {
-    OMX_VIDEO_PARAM_BITRATETYPE bitrateType;
-    InitOMXParams(&bitrateType);
-    bitrateType.nPortIndex = kPortIndexOutput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoBitrate,
-            &bitrateType, sizeof(bitrateType));
-    CHECK_EQ(err, (status_t)OK);
-
-    bitrateType.eControlRate = OMX_Video_ControlRateVariable;
-    bitrateType.nTargetBitrate = bitRate;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoBitrate,
-            &bitrateType, sizeof(bitrateType));
-    CHECK_EQ(err, (status_t)OK);
-    return OK;
-}
-
-status_t OMXCodec::getVideoProfileLevel(
-        const sp<MetaData>& meta,
-        const CodecProfileLevel& defaultProfileLevel,
-        CodecProfileLevel &profileLevel) {
-    CODEC_LOGV("Default profile: %u, level #x%x",
-            defaultProfileLevel.mProfile, defaultProfileLevel.mLevel);
-
-    // Are the default profile and level overwriten?
-    int32_t profile, level;
-    if (!meta->findInt32(kKeyVideoProfile, &profile)) {
-        profile = defaultProfileLevel.mProfile;
-    }
-    if (!meta->findInt32(kKeyVideoLevel, &level)) {
-        level = defaultProfileLevel.mLevel;
-    }
-    CODEC_LOGV("Target profile: %d, level: %d", profile, level);
-
-    // Are the target profile and level supported by the encoder?
-    OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
-    InitOMXParams(&param);
-    param.nPortIndex = kPortIndexOutput;
-    for (param.nProfileIndex = 0;; ++param.nProfileIndex) {
-        status_t err = mOMX->getParameter(
-                mNode, OMX_IndexParamVideoProfileLevelQuerySupported,
-                &param, sizeof(param));
-
-        if (err != OK) break;
-
-        int32_t supportedProfile = static_cast<int32_t>(param.eProfile);
-        int32_t supportedLevel = static_cast<int32_t>(param.eLevel);
-        CODEC_LOGV("Supported profile: %d, level %d",
-            supportedProfile, supportedLevel);
-
-        if (profile == supportedProfile &&
-            level <= supportedLevel) {
-            // We can further check whether the level is a valid
-            // value; but we will leave that to the omx encoder component
-            // via OMX_SetParameter call.
-            profileLevel.mProfile = profile;
-            profileLevel.mLevel = level;
-            return OK;
-        }
-    }
-
-    CODEC_LOGE("Target profile (%d) and level (%d) is not supported",
-            profile, level);
-    return BAD_VALUE;
-}
-
-status_t OMXCodec::setupH263EncoderParameters(const sp<MetaData>& meta) {
-    int32_t iFramesInterval, frameRate, bitRate;
-    bool success = meta->findInt32(kKeyBitRate, &bitRate);
-    success = success && meta->findInt32(kKeyFrameRate, &frameRate);
-    success = success && meta->findInt32(kKeyIFramesInterval, &iFramesInterval);
-    CHECK(success);
-    OMX_VIDEO_PARAM_H263TYPE h263type;
-    InitOMXParams(&h263type);
-    h263type.nPortIndex = kPortIndexOutput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
-    CHECK_EQ(err, (status_t)OK);
-
-    h263type.nAllowedPictureTypes =
-        OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
-
-    h263type.nPFrames = setPFramesSpacing(iFramesInterval, frameRate);
-    if (h263type.nPFrames == 0) {
-        h263type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
-    }
-    h263type.nBFrames = 0;
-
-    // Check profile and level parameters
-    CodecProfileLevel defaultProfileLevel, profileLevel;
-    defaultProfileLevel.mProfile = h263type.eProfile;
-    defaultProfileLevel.mLevel = h263type.eLevel;
-    err = getVideoProfileLevel(meta, defaultProfileLevel, profileLevel);
-    if (err != OK) return err;
-    h263type.eProfile = static_cast<OMX_VIDEO_H263PROFILETYPE>(profileLevel.mProfile);
-    h263type.eLevel = static_cast<OMX_VIDEO_H263LEVELTYPE>(profileLevel.mLevel);
-
-    h263type.bPLUSPTYPEAllowed = OMX_FALSE;
-    h263type.bForceRoundingTypeToZero = OMX_FALSE;
-    h263type.nPictureHeaderRepetition = 0;
-    h263type.nGOBHeaderInterval = 0;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
-    CHECK_EQ(err, (status_t)OK);
-
-    CHECK_EQ(setupBitRate(bitRate), (status_t)OK);
-    CHECK_EQ(setupErrorCorrectionParameters(), (status_t)OK);
-
-    return OK;
-}
-
-status_t OMXCodec::setupMPEG4EncoderParameters(const sp<MetaData>& meta) {
-    int32_t iFramesInterval, frameRate, bitRate;
-    bool success = meta->findInt32(kKeyBitRate, &bitRate);
-    success = success && meta->findInt32(kKeyFrameRate, &frameRate);
-    success = success && meta->findInt32(kKeyIFramesInterval, &iFramesInterval);
-    CHECK(success);
-    OMX_VIDEO_PARAM_MPEG4TYPE mpeg4type;
-    InitOMXParams(&mpeg4type);
-    mpeg4type.nPortIndex = kPortIndexOutput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
-    CHECK_EQ(err, (status_t)OK);
-
-    mpeg4type.nSliceHeaderSpacing = 0;
-    mpeg4type.bSVH = OMX_FALSE;
-    mpeg4type.bGov = OMX_FALSE;
-
-    mpeg4type.nAllowedPictureTypes =
-        OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
-
-    mpeg4type.nPFrames = setPFramesSpacing(iFramesInterval, frameRate);
-    if (mpeg4type.nPFrames == 0) {
-        mpeg4type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
-    }
-    mpeg4type.nBFrames = 0;
-    mpeg4type.nIDCVLCThreshold = 0;
-    mpeg4type.bACPred = OMX_TRUE;
-    mpeg4type.nMaxPacketSize = 256;
-    mpeg4type.nTimeIncRes = 1000;
-    mpeg4type.nHeaderExtension = 0;
-    mpeg4type.bReversibleVLC = OMX_FALSE;
-
-    // Check profile and level parameters
-    CodecProfileLevel defaultProfileLevel, profileLevel;
-    defaultProfileLevel.mProfile = mpeg4type.eProfile;
-    defaultProfileLevel.mLevel = mpeg4type.eLevel;
-    err = getVideoProfileLevel(meta, defaultProfileLevel, profileLevel);
-    if (err != OK) return err;
-    mpeg4type.eProfile = static_cast<OMX_VIDEO_MPEG4PROFILETYPE>(profileLevel.mProfile);
-    mpeg4type.eLevel = static_cast<OMX_VIDEO_MPEG4LEVELTYPE>(profileLevel.mLevel);
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
-    CHECK_EQ(err, (status_t)OK);
-
-    CHECK_EQ(setupBitRate(bitRate), (status_t)OK);
-    CHECK_EQ(setupErrorCorrectionParameters(), (status_t)OK);
-
-    return OK;
-}
-
-status_t OMXCodec::setupAVCEncoderParameters(const sp<MetaData>& meta) {
-    int32_t iFramesInterval, frameRate, bitRate;
-    bool success = meta->findInt32(kKeyBitRate, &bitRate);
-    success = success && meta->findInt32(kKeyFrameRate, &frameRate);
-    success = success && meta->findInt32(kKeyIFramesInterval, &iFramesInterval);
-    CHECK(success);
-
-    OMX_VIDEO_PARAM_AVCTYPE h264type;
-    InitOMXParams(&h264type);
-    h264type.nPortIndex = kPortIndexOutput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
-    CHECK_EQ(err, (status_t)OK);
-
-    h264type.nAllowedPictureTypes =
-        OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
-
-    // Check profile and level parameters
-    CodecProfileLevel defaultProfileLevel, profileLevel;
-    defaultProfileLevel.mProfile = h264type.eProfile;
-    defaultProfileLevel.mLevel = h264type.eLevel;
-    err = getVideoProfileLevel(meta, defaultProfileLevel, profileLevel);
-    if (err != OK) return err;
-    h264type.eProfile = static_cast<OMX_VIDEO_AVCPROFILETYPE>(profileLevel.mProfile);
-    h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(profileLevel.mLevel);
-
-    // XXX
-    if (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline) {
-        ALOGW("Use baseline profile instead of %d for AVC recording",
-            h264type.eProfile);
-        h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
-    }
-
-    if (h264type.eProfile == OMX_VIDEO_AVCProfileBaseline) {
-        h264type.nSliceHeaderSpacing = 0;
-        h264type.bUseHadamard = OMX_TRUE;
-        h264type.nRefFrames = 1;
-        h264type.nBFrames = 0;
-        h264type.nPFrames = setPFramesSpacing(iFramesInterval, frameRate);
-        if (h264type.nPFrames == 0) {
-            h264type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
-        }
-        h264type.nRefIdx10ActiveMinus1 = 0;
-        h264type.nRefIdx11ActiveMinus1 = 0;
-        h264type.bEntropyCodingCABAC = OMX_FALSE;
-        h264type.bWeightedPPrediction = OMX_FALSE;
-        h264type.bconstIpred = OMX_FALSE;
-        h264type.bDirect8x8Inference = OMX_FALSE;
-        h264type.bDirectSpatialTemporal = OMX_FALSE;
-        h264type.nCabacInitIdc = 0;
-    }
-
-    if (h264type.nBFrames != 0) {
-        h264type.nAllowedPictureTypes |= OMX_VIDEO_PictureTypeB;
-    }
-
-    h264type.bEnableUEP = OMX_FALSE;
-    h264type.bEnableFMO = OMX_FALSE;
-    h264type.bEnableASO = OMX_FALSE;
-    h264type.bEnableRS = OMX_FALSE;
-    h264type.bFrameMBsOnly = OMX_TRUE;
-    h264type.bMBAFF = OMX_FALSE;
-    h264type.eLoopFilterMode = OMX_VIDEO_AVCLoopFilterEnable;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
-    CHECK_EQ(err, (status_t)OK);
-
-    CHECK_EQ(setupBitRate(bitRate), (status_t)OK);
-
-    return OK;
-}
-
-status_t OMXCodec::setVideoOutputFormat(
-        const char *mime, const sp<MetaData>& meta) {
-
-    int32_t width, height;
-    bool success = meta->findInt32(kKeyWidth, &width);
-    success = success && meta->findInt32(kKeyHeight, &height);
-    CHECK(success);
-
-    CODEC_LOGV("setVideoOutputFormat width=%d, height=%d", width, height);
-
-    OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
-    if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
-        compressionFormat = OMX_VIDEO_CodingAVC;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
-        compressionFormat = OMX_VIDEO_CodingMPEG4;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
-        compressionFormat = OMX_VIDEO_CodingHEVC;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
-        compressionFormat = OMX_VIDEO_CodingH263;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VP8, mime)) {
-        compressionFormat = OMX_VIDEO_CodingVP8;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VP9, mime)) {
-        compressionFormat = OMX_VIDEO_CodingVP9;
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG2, mime)) {
-        compressionFormat = OMX_VIDEO_CodingMPEG2;
-    } else {
-        ALOGE("Not a supported video mime type: %s", mime);
-        CHECK(!"Should not be here. Not a supported video mime type.");
-    }
-
-    status_t err = setVideoPortFormatType(
-            kPortIndexInput, compressionFormat, OMX_COLOR_FormatUnused);
-
-    if (err != OK) {
-        return err;
-    }
-
-#if 1
-    {
-        OMX_VIDEO_PARAM_PORTFORMATTYPE format;
-        InitOMXParams(&format);
-        format.nPortIndex = kPortIndexOutput;
-        format.nIndex = 0;
-
-        status_t err = mOMX->getParameter(
-                mNode, OMX_IndexParamVideoPortFormat,
-                &format, sizeof(format));
-        CHECK_EQ(err, (status_t)OK);
-        CHECK_EQ((int)format.eCompressionFormat, (int)OMX_VIDEO_CodingUnused);
-
-        int32_t colorFormat;
-        if (meta->findInt32(kKeyColorFormat, &colorFormat)
-                && colorFormat != OMX_COLOR_FormatUnused
-                && colorFormat != format.eColorFormat) {
-
-            while (OMX_ErrorNoMore != err) {
-                format.nIndex++;
-                err = mOMX->getParameter(
-                        mNode, OMX_IndexParamVideoPortFormat,
-                            &format, sizeof(format));
-                if (format.eColorFormat == colorFormat) {
-                    break;
-                }
-            }
-            if (format.eColorFormat != colorFormat) {
-                CODEC_LOGE("Color format %d is not supported", colorFormat);
-                return ERROR_UNSUPPORTED;
-            }
-        }
-
-        err = mOMX->setParameter(
-                mNode, OMX_IndexParamVideoPortFormat,
-                &format, sizeof(format));
-
-        if (err != OK) {
-            return err;
-        }
-    }
-#endif
-
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexInput;
-
-    OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
-
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-
-    CHECK_EQ(err, (status_t)OK);
-
-#if 1
-    // XXX Need a (much) better heuristic to compute input buffer sizes.
-    const size_t X = 64 * 1024;
-    if (def.nBufferSize < X) {
-        def.nBufferSize = X;
-    }
-#endif
-
-    CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
-
-    video_def->nFrameWidth = width;
-    video_def->nFrameHeight = height;
-
-    video_def->eCompressionFormat = compressionFormat;
-    video_def->eColorFormat = OMX_COLOR_FormatUnused;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-
-    if (err != OK) {
-        return err;
-    }
-
-    ////////////////////////////////////////////////////////////////////////////
-
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexOutput;
-
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-    CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
-
-#if 0
-    def.nBufferSize =
-        (((width + 15) & -16) * ((height + 15) & -16) * 3) / 2;  // YUV420
-#endif
-
-    video_def->nFrameWidth = width;
-    video_def->nFrameHeight = height;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-
-    return err;
-}
-
-OMXCodec::OMXCodec(
-        const sp<IOMX> &omx, IOMX::node_id node,
-        uint32_t quirks, uint32_t flags,
-        bool isEncoder,
-        const char *mime,
-        const char *componentName,
-        const sp<MediaSource> &source,
-        const sp<ANativeWindow> &nativeWindow)
-    : mOMX(omx),
-      mOMXLivesLocally(omx->livesLocally(node, getpid())),
-      mNode(node),
-      mQuirks(quirks),
-      mFlags(flags),
-      mIsEncoder(isEncoder),
-      mIsVideo(!strncasecmp("video/", mime, 6)),
-      mMIME(strdup(mime)),
-      mComponentName(strdup(componentName)),
-      mSource(source),
-      mCodecSpecificDataIndex(0),
-      mState(LOADED),
-      mInitialBufferSubmit(true),
-      mSignalledEOS(false),
-      mNoMoreOutputData(false),
-      mOutputPortSettingsHaveChanged(false),
-      mSeekTimeUs(-1),
-      mSeekMode(ReadOptions::SEEK_CLOSEST_SYNC),
-      mTargetTimeUs(-1),
-      mOutputPortSettingsChangedPending(false),
-      mSkipCutBuffer(NULL),
-      mLeftOverBuffer(NULL),
-      mPaused(false),
-      mNativeWindow(
-              (!strncmp(componentName, "OMX.google.", 11))
-                        ? NULL : nativeWindow) {
-    mPortStatus[kPortIndexInput] = ENABLED;
-    mPortStatus[kPortIndexOutput] = ENABLED;
-
-    setComponentRole();
-}
-
-// static
-void OMXCodec::setComponentRole(
-        const sp<IOMX> &omx, IOMX::node_id node, bool isEncoder,
-        const char *mime) {
-    struct MimeToRole {
-        const char *mime;
-        const char *decoderRole;
-        const char *encoderRole;
-    };
-
-    static const MimeToRole kMimeToRole[] = {
-        { MEDIA_MIMETYPE_AUDIO_MPEG,
-            "audio_decoder.mp3", "audio_encoder.mp3" },
-        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I,
-            "audio_decoder.mp1", "audio_encoder.mp1" },
-        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
-            "audio_decoder.mp2", "audio_encoder.mp2" },
-        { MEDIA_MIMETYPE_AUDIO_AMR_NB,
-            "audio_decoder.amrnb", "audio_encoder.amrnb" },
-        { MEDIA_MIMETYPE_AUDIO_AMR_WB,
-            "audio_decoder.amrwb", "audio_encoder.amrwb" },
-        { MEDIA_MIMETYPE_AUDIO_AAC,
-            "audio_decoder.aac", "audio_encoder.aac" },
-        { MEDIA_MIMETYPE_AUDIO_VORBIS,
-            "audio_decoder.vorbis", "audio_encoder.vorbis" },
-        { MEDIA_MIMETYPE_AUDIO_OPUS,
-            "audio_decoder.opus", "audio_encoder.opus" },
-        { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
-            "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
-        { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
-            "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
-        { MEDIA_MIMETYPE_VIDEO_AVC,
-            "video_decoder.avc", "video_encoder.avc" },
-        { MEDIA_MIMETYPE_VIDEO_HEVC,
-            "video_decoder.hevc", "video_encoder.hevc" },
-        { MEDIA_MIMETYPE_VIDEO_MPEG4,
-            "video_decoder.mpeg4", "video_encoder.mpeg4" },
-        { MEDIA_MIMETYPE_VIDEO_H263,
-            "video_decoder.h263", "video_encoder.h263" },
-        { MEDIA_MIMETYPE_VIDEO_VP8,
-            "video_decoder.vp8", "video_encoder.vp8" },
-        { MEDIA_MIMETYPE_VIDEO_VP9,
-            "video_decoder.vp9", "video_encoder.vp9" },
-        { MEDIA_MIMETYPE_AUDIO_RAW,
-            "audio_decoder.raw", "audio_encoder.raw" },
-        { MEDIA_MIMETYPE_AUDIO_FLAC,
-            "audio_decoder.flac", "audio_encoder.flac" },
-        { MEDIA_MIMETYPE_AUDIO_MSGSM,
-            "audio_decoder.gsm", "audio_encoder.gsm" },
-        { MEDIA_MIMETYPE_VIDEO_MPEG2,
-            "video_decoder.mpeg2", "video_encoder.mpeg2" },
-        { MEDIA_MIMETYPE_AUDIO_AC3,
-            "audio_decoder.ac3", "audio_encoder.ac3" },
-    };
-
-    static const size_t kNumMimeToRole =
-        sizeof(kMimeToRole) / sizeof(kMimeToRole[0]);
-
-    size_t i;
-    for (i = 0; i < kNumMimeToRole; ++i) {
-        if (!strcasecmp(mime, kMimeToRole[i].mime)) {
-            break;
-        }
-    }
-
-    if (i == kNumMimeToRole) {
-        return;
-    }
-
-    const char *role =
-        isEncoder ? kMimeToRole[i].encoderRole
-                  : kMimeToRole[i].decoderRole;
-
-    if (role != NULL) {
-        OMX_PARAM_COMPONENTROLETYPE roleParams;
-        InitOMXParams(&roleParams);
-
-        strncpy((char *)roleParams.cRole,
-                role, OMX_MAX_STRINGNAME_SIZE - 1);
-
-        roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
-
-        status_t err = omx->setParameter(
-                node, OMX_IndexParamStandardComponentRole,
-                &roleParams, sizeof(roleParams));
-
-        if (err != OK) {
-            ALOGW("Failed to set standard component role '%s'.", role);
-        }
-    }
-}
-
-void OMXCodec::setComponentRole() {
-    setComponentRole(mOMX, mNode, mIsEncoder, mMIME);
-}
-
-OMXCodec::~OMXCodec() {
-    mSource.clear();
-
-    CHECK(mState == LOADED || mState == ERROR || mState == LOADED_TO_IDLE);
-
-    status_t err = mOMX->freeNode(mNode);
-    CHECK_EQ(err, (status_t)OK);
-
-    mNode = 0;
-    setState(DEAD);
-
-    clearCodecSpecificData();
-
-    free(mComponentName);
-    mComponentName = NULL;
-
-    free(mMIME);
-    mMIME = NULL;
-}
-
-status_t OMXCodec::init() {
-    // mLock is held.
-
-    CHECK_EQ((int)mState, (int)LOADED);
-
-    status_t err;
-    if (!(mQuirks & kRequiresLoadedToIdleAfterAllocation)) {
-        err = mOMX->sendCommand(mNode, OMX_CommandStateSet, OMX_StateIdle);
-        CHECK_EQ(err, (status_t)OK);
-        setState(LOADED_TO_IDLE);
-    }
-
-    err = allocateBuffers();
-    if (err != (status_t)OK) {
-        return err;
-    }
-
-    if (mQuirks & kRequiresLoadedToIdleAfterAllocation) {
-        err = mOMX->sendCommand(mNode, OMX_CommandStateSet, OMX_StateIdle);
-        CHECK_EQ(err, (status_t)OK);
-
-        setState(LOADED_TO_IDLE);
-    }
-
-    while (mState != EXECUTING && mState != ERROR) {
-        mAsyncCompletion.wait(mLock);
-    }
-
-    return mState == ERROR ? UNKNOWN_ERROR : OK;
-}
-
-// static
-bool OMXCodec::isIntermediateState(State state) {
-    return state == LOADED_TO_IDLE
-        || state == IDLE_TO_EXECUTING
-        || state == EXECUTING_TO_IDLE
-        || state == IDLE_TO_LOADED
-        || state == RECONFIGURING;
-}
-
-status_t OMXCodec::allocateBuffers() {
-    status_t err = allocateBuffersOnPort(kPortIndexInput);
-
-    if (err != OK) {
-        return err;
-    }
-
-    return allocateBuffersOnPort(kPortIndexOutput);
-}
-
-status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
-    if (mNativeWindow != NULL && portIndex == kPortIndexOutput) {
-        return allocateOutputBuffersFromNativeWindow();
-    }
-
-    if ((mFlags & kEnableGrallocUsageProtected) && portIndex == kPortIndexOutput) {
-        ALOGE("protected output buffers must be stent to an ANativeWindow");
-        return PERMISSION_DENIED;
-    }
-
-    status_t err = OK;
-    if ((mFlags & kStoreMetaDataInVideoBuffers)
-            && portIndex == kPortIndexInput) {
-        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE);
-        if (err != OK) {
-            ALOGE("Storing meta data in video buffers is not supported");
-            return err;
-        }
-    }
-
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = portIndex;
-
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-
-    if (err != OK) {
-        return err;
-    }
-
-    CODEC_LOGV("allocating %u buffers of size %u on %s port",
-            def.nBufferCountActual, def.nBufferSize,
-            portIndex == kPortIndexInput ? "input" : "output");
-
-    if (def.nBufferSize != 0 && def.nBufferCountActual > SIZE_MAX / def.nBufferSize) {
-        return BAD_VALUE;
-    }
-    size_t totalSize = def.nBufferCountActual * def.nBufferSize;
-    mDealer[portIndex] = new MemoryDealer(totalSize, "OMXCodec");
-
-    for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
-        sp<IMemory> mem = mDealer[portIndex]->allocate(def.nBufferSize);
-        if (mem == NULL || mem->pointer() == NULL) {
-            return NO_MEMORY;
-        }
-
-        BufferInfo info;
-        info.mData = NULL;
-        info.mSize = def.nBufferSize;
-
-        IOMX::buffer_id buffer;
-        if (portIndex == kPortIndexInput
-                && ((mQuirks & kRequiresAllocateBufferOnInputPorts)
-                    || (mFlags & kUseSecureInputBuffers))) {
-            if (mOMXLivesLocally) {
-                mem.clear();
-
-                err = mOMX->allocateBuffer(
-                        mNode, portIndex, def.nBufferSize, &buffer,
-                        &info.mData);
-            } else {
-                err = mOMX->allocateBufferWithBackup(
-                        mNode, portIndex, mem, &buffer, mem->size());
-            }
-        } else if (portIndex == kPortIndexOutput
-                && (mQuirks & kRequiresAllocateBufferOnOutputPorts)) {
-            if (mOMXLivesLocally) {
-                mem.clear();
-
-                err = mOMX->allocateBuffer(
-                        mNode, portIndex, def.nBufferSize, &buffer,
-                        &info.mData);
-            } else {
-                err = mOMX->allocateBufferWithBackup(
-                        mNode, portIndex, mem, &buffer, mem->size());
-            }
-        } else {
-            err = mOMX->useBuffer(mNode, portIndex, mem, &buffer, mem->size());
-        }
-
-        if (err != OK) {
-            ALOGE("allocate_buffer_with_backup failed");
-            return err;
-        }
-
-        if (mem != NULL) {
-            info.mData = mem->pointer();
-        }
-
-        info.mBuffer = buffer;
-        info.mStatus = OWNED_BY_US;
-        info.mMem = mem;
-        info.mMediaBuffer = NULL;
-
-        if (portIndex == kPortIndexOutput) {
-            // Fail deferred MediaBuffer creation until FILL_BUFFER_DONE;
-            // this legacy mode is no longer supported.
-            LOG_ALWAYS_FATAL_IF((mOMXLivesLocally
-                    && (mQuirks & kRequiresAllocateBufferOnOutputPorts)
-                    && (mQuirks & kDefersOutputBufferAllocation)),
-                    "allocateBuffersOnPort cannot defer buffer allocation");
-
-            info.mMediaBuffer = new MediaBuffer(info.mData, info.mSize);
-            info.mMediaBuffer->setObserver(this);
-        }
-
-        mPortBuffers[portIndex].push(info);
-
-        CODEC_LOGV("allocated buffer %u on %s port", buffer,
-             portIndex == kPortIndexInput ? "input" : "output");
-    }
-
-    if (portIndex == kPortIndexOutput) {
-
-        sp<MetaData> meta = mSource->getFormat();
-        int32_t delay = 0;
-        if (!meta->findInt32(kKeyEncoderDelay, &delay)) {
-            delay = 0;
-        }
-        int32_t padding = 0;
-        if (!meta->findInt32(kKeyEncoderPadding, &padding)) {
-            padding = 0;
-        }
-        int32_t numchannels = 0;
-        if (delay + padding) {
-            if (mOutputFormat->findInt32(kKeyChannelCount, &numchannels)) {
-                size_t frameSize = numchannels * sizeof(int16_t);
-                if (mSkipCutBuffer != NULL) {
-                    size_t prevbuffersize = mSkipCutBuffer->size();
-                    if (prevbuffersize != 0) {
-                        ALOGW("Replacing SkipCutBuffer holding %zu bytes", prevbuffersize);
-                    }
-                }
-                mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
-            }
-        }
-    }
-
-    // dumpPortStatus(portIndex);
-
-    if (portIndex == kPortIndexInput && (mFlags & kUseSecureInputBuffers)) {
-        Vector<MediaBuffer *> buffers;
-        for (size_t i = 0; i < def.nBufferCountActual; ++i) {
-            const BufferInfo &info = mPortBuffers[kPortIndexInput].itemAt(i);
-
-            MediaBuffer *mbuf = new MediaBuffer(info.mData, info.mSize);
-            buffers.push(mbuf);
-        }
-
-        status_t err = mSource->setBuffers(buffers);
-
-        if (err != OK) {
-            for (size_t i = 0; i < def.nBufferCountActual; ++i) {
-                buffers.editItemAt(i)->release();
-            }
-            buffers.clear();
-
-            CODEC_LOGE(
-                    "Codec requested to use secure input buffers but "
-                    "upstream source didn't support that.");
-
-            return err;
-        }
-    }
-
-    return OK;
-}
-
-status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
-    // Get the number of buffers needed.
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexOutput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    if (err != OK) {
-        CODEC_LOGE("getParameter failed: %d", err);
-        return err;
-    }
-
-    sp<MetaData> meta = mSource->getFormat();
-
-    int32_t rotationDegrees;
-    if (!meta->findInt32(kKeyRotation, &rotationDegrees)) {
-        rotationDegrees = 0;
-    }
-
-    // Set up the native window.
-    OMX_U32 usage = 0;
-    err = mOMX->getGraphicBufferUsage(mNode, kPortIndexOutput, &usage);
-    if (err != 0) {
-        ALOGW("querying usage flags from OMX IL component failed: %d", err);
-        // XXX: Currently this error is logged, but not fatal.
-        usage = 0;
-    }
-
-    if (mFlags & kEnableGrallocUsageProtected) {
-        usage |= GRALLOC_USAGE_PROTECTED;
-    }
-
-    err = setNativeWindowSizeFormatAndUsage(
-            mNativeWindow.get(),
-            def.format.video.nFrameWidth,
-            def.format.video.nFrameHeight,
-            def.format.video.eColorFormat,
-            rotationDegrees,
-            usage | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP);
-    if (err != 0) {
-        return err;
-    }
-
-    int minUndequeuedBufs = 0;
-    err = mNativeWindow->query(mNativeWindow.get(),
-            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
-    if (err != 0) {
-        ALOGE("NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS query failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-    // FIXME: assume that surface is controlled by app (native window
-    // returns the number for the case when surface is not controlled by app)
-    // FIXME2: This means that minUndeqeueudBufs can be 1 larger than reported
-    // For now, try to allocate 1 more buffer, but don't fail if unsuccessful
-
-    // Use conservative allocation while also trying to reduce starvation
-    //
-    // 1. allocate at least nBufferCountMin + minUndequeuedBuffers - that is the
-    //    minimum needed for the consumer to be able to work
-    // 2. try to allocate two (2) additional buffers to reduce starvation from
-    //    the consumer
-    //    plus an extra buffer to account for incorrect minUndequeuedBufs
-    CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
-            def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
-
-    for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
-        OMX_U32 newBufferCount =
-            def.nBufferCountMin + minUndequeuedBufs + extraBuffers;
-        def.nBufferCountActual = newBufferCount;
-        err = mOMX->setParameter(
-                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-
-        if (err == OK) {
-            minUndequeuedBufs += extraBuffers;
-            break;
-        }
-
-        CODEC_LOGW("setting nBufferCountActual to %u failed: %d",
-                newBufferCount, err);
-        /* exit condition */
-        if (extraBuffers == 0) {
-            return err;
-        }
-    }
-    CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
-            def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
-
-    err = native_window_set_buffer_count(
-            mNativeWindow.get(), def.nBufferCountActual);
-    if (err != 0) {
-        ALOGE("native_window_set_buffer_count failed: %s (%d)", strerror(-err),
-                -err);
-        return err;
-    }
-
-    CODEC_LOGV("allocating %u buffers from a native window of size %u on "
-            "output port", def.nBufferCountActual, def.nBufferSize);
-
-    // Dequeue buffers and send them to OMX
-    for (OMX_U32 i = 0; i < def.nBufferCountActual; i++) {
-        ANativeWindowBuffer* buf;
-        err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &buf);
-        if (err != 0) {
-            ALOGE("dequeueBuffer failed: %s (%d)", strerror(-err), -err);
-            break;
-        }
-
-        sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
-        BufferInfo info;
-        info.mData = NULL;
-        info.mSize = def.nBufferSize;
-        info.mStatus = OWNED_BY_US;
-        info.mMem = NULL;
-        info.mMediaBuffer = new MediaBuffer(graphicBuffer);
-        info.mMediaBuffer->setObserver(this);
-        mPortBuffers[kPortIndexOutput].push(info);
-
-        IOMX::buffer_id bufferId;
-        err = mOMX->useGraphicBuffer(mNode, kPortIndexOutput, graphicBuffer,
-                &bufferId);
-        if (err != 0) {
-            CODEC_LOGE("registering GraphicBuffer with OMX IL component "
-                    "failed: %d", err);
-            break;
-        }
-
-        mPortBuffers[kPortIndexOutput].editItemAt(i).mBuffer = bufferId;
-
-        CODEC_LOGV("registered graphic buffer with ID %u (pointer = %p)",
-                bufferId, graphicBuffer.get());
-    }
-
-    OMX_U32 cancelStart;
-    OMX_U32 cancelEnd;
-    if (err != 0) {
-        // If an error occurred while dequeuing we need to cancel any buffers
-        // that were dequeued.
-        cancelStart = 0;
-        cancelEnd = mPortBuffers[kPortIndexOutput].size();
-    } else {
-        // Return the last two buffers to the native window.
-        cancelStart = def.nBufferCountActual - minUndequeuedBufs;
-        cancelEnd = def.nBufferCountActual;
-    }
-
-    for (OMX_U32 i = cancelStart; i < cancelEnd; i++) {
-        BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(i);
-        cancelBufferToNativeWindow(info);
-    }
-
-    return err;
-}
-
-status_t OMXCodec::cancelBufferToNativeWindow(BufferInfo *info) {
-    CHECK_EQ((int)info->mStatus, (int)OWNED_BY_US);
-    CODEC_LOGV("Calling cancelBuffer on buffer %u", info->mBuffer);
-    int err = mNativeWindow->cancelBuffer(
-        mNativeWindow.get(), info->mMediaBuffer->graphicBuffer().get(), -1);
-    if (err != 0) {
-      CODEC_LOGE("cancelBuffer failed w/ error 0x%08x", err);
-
-      setState(ERROR);
-      return err;
-    }
-    info->mStatus = OWNED_BY_NATIVE_WINDOW;
-    return OK;
-}
-
-OMXCodec::BufferInfo* OMXCodec::dequeueBufferFromNativeWindow() {
-    // Dequeue the next buffer from the native window.
-    ANativeWindowBuffer* buf;
-    int err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &buf);
-    if (err != 0) {
-      CODEC_LOGE("dequeueBuffer failed w/ error 0x%08x", err);
-
-      setState(ERROR);
-      return 0;
-    }
-
-    // Determine which buffer we just dequeued.
-    Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
-    BufferInfo *bufInfo = 0;
-    for (size_t i = 0; i < buffers->size(); i++) {
-      sp<GraphicBuffer> graphicBuffer = buffers->itemAt(i).
-          mMediaBuffer->graphicBuffer();
-      if (graphicBuffer->handle == buf->handle) {
-        bufInfo = &buffers->editItemAt(i);
-        break;
-      }
-    }
-
-    if (bufInfo == 0) {
-        CODEC_LOGE("dequeued unrecognized buffer: %p", buf);
-
-        setState(ERROR);
-        return 0;
-    }
-
-    // The native window no longer owns the buffer.
-    CHECK_EQ((int)bufInfo->mStatus, (int)OWNED_BY_NATIVE_WINDOW);
-    bufInfo->mStatus = OWNED_BY_US;
-
-    return bufInfo;
-}
-
-int64_t OMXCodec::getDecodingTimeUs() {
-    CHECK(mIsEncoder && mIsVideo);
-
-    if (mDecodingTimeList.empty()) {
-        CHECK(mSignalledEOS || mNoMoreOutputData);
-        // No corresponding input frame available.
-        // This could happen when EOS is reached.
-        return 0;
-    }
-
-    List<int64_t>::iterator it = mDecodingTimeList.begin();
-    int64_t timeUs = *it;
-    mDecodingTimeList.erase(it);
-    return timeUs;
-}
-
-void OMXCodec::on_message(const omx_message &msg) {
-    if (mState == ERROR) {
-        /*
-         * only drop EVENT messages, EBD and FBD are still
-         * processed for bookkeeping purposes
-         */
-        if (msg.type == omx_message::EVENT) {
-            ALOGW("Dropping OMX EVENT message - we're in ERROR state.");
-            return;
-        }
-    }
-
-    switch (msg.type) {
-        case omx_message::EVENT:
-        {
-            onEvent(
-                 msg.u.event_data.event, msg.u.event_data.data1,
-                 msg.u.event_data.data2);
-
-            break;
-        }
-
-        case omx_message::EMPTY_BUFFER_DONE:
-        {
-            IOMX::buffer_id buffer = msg.u.extended_buffer_data.buffer;
-
-            CODEC_LOGV("EMPTY_BUFFER_DONE(buffer: %u)", buffer);
-
-            Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
-            size_t i = 0;
-            while (i < buffers->size() && (*buffers)[i].mBuffer != buffer) {
-                ++i;
-            }
-
-            CHECK(i < buffers->size());
-            if ((*buffers)[i].mStatus != OWNED_BY_COMPONENT) {
-                ALOGW("We already own input buffer %u, yet received "
-                     "an EMPTY_BUFFER_DONE.", buffer);
-            }
-
-            BufferInfo* info = &buffers->editItemAt(i);
-            info->mStatus = OWNED_BY_US;
-
-            // Buffer could not be released until empty buffer done is called.
-            if (info->mMediaBuffer != NULL) {
-                info->mMediaBuffer->release();
-                info->mMediaBuffer = NULL;
-            }
-
-            if (mPortStatus[kPortIndexInput] == DISABLING) {
-                CODEC_LOGV("Port is disabled, freeing buffer %u", buffer);
-
-                status_t err = freeBuffer(kPortIndexInput, i);
-                CHECK_EQ(err, (status_t)OK);
-            } else if (mState != ERROR
-                    && mPortStatus[kPortIndexInput] != SHUTTING_DOWN) {
-                CHECK_EQ((int)mPortStatus[kPortIndexInput], (int)ENABLED);
-
-                if (mFlags & kUseSecureInputBuffers) {
-                    drainAnyInputBuffer();
-                } else {
-                    drainInputBuffer(&buffers->editItemAt(i));
-                }
-            }
-            break;
-        }
-
-        case omx_message::FILL_BUFFER_DONE:
-        {
-            IOMX::buffer_id buffer = msg.u.extended_buffer_data.buffer;
-            OMX_U32 flags = msg.u.extended_buffer_data.flags;
-
-            CODEC_LOGV("FILL_BUFFER_DONE(buffer: %u, size: %u, flags: 0x%08x, timestamp: %lld us (%.2f secs))",
-                 buffer,
-                 msg.u.extended_buffer_data.range_length,
-                 flags,
-                 msg.u.extended_buffer_data.timestamp,
-                 msg.u.extended_buffer_data.timestamp / 1E6);
-
-            Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
-            size_t i = 0;
-            while (i < buffers->size() && (*buffers)[i].mBuffer != buffer) {
-                ++i;
-            }
-
-            CHECK(i < buffers->size());
-            BufferInfo *info = &buffers->editItemAt(i);
-
-            if (info->mStatus != OWNED_BY_COMPONENT) {
-                ALOGW("We already own output buffer %u, yet received "
-                     "a FILL_BUFFER_DONE.", buffer);
-            }
-
-            info->mStatus = OWNED_BY_US;
-
-            if (mPortStatus[kPortIndexOutput] == DISABLING) {
-                CODEC_LOGV("Port is disabled, freeing buffer %u", buffer);
-
-                status_t err = freeBuffer(kPortIndexOutput, i);
-                CHECK_EQ(err, (status_t)OK);
-
-#if 0
-            } else if (mPortStatus[kPortIndexOutput] == ENABLED
-                       && (flags & OMX_BUFFERFLAG_EOS)) {
-                CODEC_LOGV("No more output data.");
-                mNoMoreOutputData = true;
-                mBufferFilled.signal();
-#endif
-            } else if (mPortStatus[kPortIndexOutput] != SHUTTING_DOWN) {
-                CHECK_EQ((int)mPortStatus[kPortIndexOutput], (int)ENABLED);
-
-                MediaBuffer *buffer = info->mMediaBuffer;
-                bool isGraphicBuffer = buffer->graphicBuffer() != NULL;
-
-                if (!isGraphicBuffer
-                    && msg.u.extended_buffer_data.range_offset
-                        + msg.u.extended_buffer_data.range_length
-                            > buffer->size()) {
-                    CODEC_LOGE(
-                            "Codec lied about its buffer size requirements, "
-                            "sending a buffer larger than the originally "
-                            "advertised size in FILL_BUFFER_DONE!");
-                }
-                buffer->set_range(
-                        msg.u.extended_buffer_data.range_offset,
-                        msg.u.extended_buffer_data.range_length);
-
-                buffer->meta_data()->clear();
-
-                buffer->meta_data()->setInt64(
-                        kKeyTime, msg.u.extended_buffer_data.timestamp);
-
-                if (msg.u.extended_buffer_data.flags & OMX_BUFFERFLAG_SYNCFRAME) {
-                    buffer->meta_data()->setInt32(kKeyIsSyncFrame, true);
-                }
-                bool isCodecSpecific = false;
-                if (msg.u.extended_buffer_data.flags & OMX_BUFFERFLAG_CODECCONFIG) {
-                    buffer->meta_data()->setInt32(kKeyIsCodecConfig, true);
-                    isCodecSpecific = true;
-                }
-
-                if (isGraphicBuffer || mQuirks & kOutputBuffersAreUnreadable) {
-                    buffer->meta_data()->setInt32(kKeyIsUnreadable, true);
-                }
-
-                buffer->meta_data()->setInt32(
-                        kKeyBufferID,
-                        msg.u.extended_buffer_data.buffer);
-
-                if (msg.u.extended_buffer_data.flags & OMX_BUFFERFLAG_EOS) {
-                    CODEC_LOGV("No more output data.");
-                    mNoMoreOutputData = true;
-                }
-
-                if (mIsEncoder && mIsVideo) {
-                    int64_t decodingTimeUs = isCodecSpecific? 0: getDecodingTimeUs();
-                    buffer->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
-                }
-
-                if (mTargetTimeUs >= 0) {
-                    CHECK(msg.u.extended_buffer_data.timestamp <= mTargetTimeUs);
-
-                    if (msg.u.extended_buffer_data.timestamp < mTargetTimeUs) {
-                        CODEC_LOGV(
-                                "skipping output buffer at timestamp %lld us",
-                                msg.u.extended_buffer_data.timestamp);
-
-                        fillOutputBuffer(info);
-                        break;
-                    }
-
-                    CODEC_LOGV(
-                            "returning output buffer at target timestamp "
-                            "%lld us",
-                            msg.u.extended_buffer_data.timestamp);
-
-                    mTargetTimeUs = -1;
-                }
-
-                mFilledBuffers.push_back(i);
-                mBufferFilled.signal();
-                if (mIsEncoder) {
-                    sched_yield();
-                }
-            }
-
-            break;
-        }
-
-        default:
-        {
-            CHECK(!"should not be here.");
-            break;
-        }
-    }
-}
-
-// Has the format changed in any way that the client would have to be aware of?
-static bool formatHasNotablyChanged(
-        const sp<MetaData> &from, const sp<MetaData> &to) {
-    if (from.get() == NULL && to.get() == NULL) {
-        return false;
-    }
-
-    if ((from.get() == NULL && to.get() != NULL)
-        || (from.get() != NULL && to.get() == NULL)) {
-        return true;
-    }
-
-    const char *mime_from, *mime_to;
-    CHECK(from->findCString(kKeyMIMEType, &mime_from));
-    CHECK(to->findCString(kKeyMIMEType, &mime_to));
-
-    if (strcasecmp(mime_from, mime_to)) {
-        return true;
-    }
-
-    if (!strcasecmp(mime_from, MEDIA_MIMETYPE_VIDEO_RAW)) {
-        int32_t colorFormat_from, colorFormat_to;
-        CHECK(from->findInt32(kKeyColorFormat, &colorFormat_from));
-        CHECK(to->findInt32(kKeyColorFormat, &colorFormat_to));
-
-        if (colorFormat_from != colorFormat_to) {
-            return true;
-        }
-
-        int32_t width_from, width_to;
-        CHECK(from->findInt32(kKeyWidth, &width_from));
-        CHECK(to->findInt32(kKeyWidth, &width_to));
-
-        if (width_from != width_to) {
-            return true;
-        }
-
-        int32_t height_from, height_to;
-        CHECK(from->findInt32(kKeyHeight, &height_from));
-        CHECK(to->findInt32(kKeyHeight, &height_to));
-
-        if (height_from != height_to) {
-            return true;
-        }
-
-        int32_t left_from, top_from, right_from, bottom_from;
-        CHECK(from->findRect(
-                    kKeyCropRect,
-                    &left_from, &top_from, &right_from, &bottom_from));
-
-        int32_t left_to, top_to, right_to, bottom_to;
-        CHECK(to->findRect(
-                    kKeyCropRect,
-                    &left_to, &top_to, &right_to, &bottom_to));
-
-        if (left_to != left_from || top_to != top_from
-                || right_to != right_from || bottom_to != bottom_from) {
-            return true;
-        }
-    } else if (!strcasecmp(mime_from, MEDIA_MIMETYPE_AUDIO_RAW)) {
-        int32_t numChannels_from, numChannels_to;
-        CHECK(from->findInt32(kKeyChannelCount, &numChannels_from));
-        CHECK(to->findInt32(kKeyChannelCount, &numChannels_to));
-
-        if (numChannels_from != numChannels_to) {
-            return true;
-        }
-
-        int32_t sampleRate_from, sampleRate_to;
-        CHECK(from->findInt32(kKeySampleRate, &sampleRate_from));
-        CHECK(to->findInt32(kKeySampleRate, &sampleRate_to));
-
-        if (sampleRate_from != sampleRate_to) {
-            return true;
-        }
-    }
-
-    return false;
-}
-
-void OMXCodec::onEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
-    switch (event) {
-        case OMX_EventCmdComplete:
-        {
-            onCmdComplete((OMX_COMMANDTYPE)data1, data2);
-            break;
-        }
-
-        case OMX_EventError:
-        {
-            CODEC_LOGE("OMX_EventError(0x%08x, %u)", data1, data2);
-
-            setState(ERROR);
-            break;
-        }
-
-        case OMX_EventPortSettingsChanged:
-        {
-            CODEC_LOGV("OMX_EventPortSettingsChanged(port=%u, data2=0x%08x)",
-                       data1, data2);
-
-            if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
-                onPortSettingsChanged(data1);
-            } else if (data1 == kPortIndexOutput &&
-                        (data2 == OMX_IndexConfigCommonOutputCrop ||
-                         data2 == OMX_IndexConfigCommonScale)) {
-
-                sp<MetaData> oldOutputFormat = mOutputFormat;
-                initOutputFormat(mSource->getFormat());
-
-                if (data2 == OMX_IndexConfigCommonOutputCrop &&
-                    formatHasNotablyChanged(oldOutputFormat, mOutputFormat)) {
-                    mOutputPortSettingsHaveChanged = true;
-
-                } else if (data2 == OMX_IndexConfigCommonScale) {
-                    OMX_CONFIG_SCALEFACTORTYPE scale;
-                    InitOMXParams(&scale);
-                    scale.nPortIndex = kPortIndexOutput;
-
-                    // Change display dimension only when necessary.
-                    if (OK == mOMX->getConfig(
-                                        mNode,
-                                        OMX_IndexConfigCommonScale,
-                                        &scale, sizeof(scale))) {
-                        int32_t left, top, right, bottom;
-                        CHECK(mOutputFormat->findRect(kKeyCropRect,
-                                                      &left, &top,
-                                                      &right, &bottom));
-
-                        // The scale is in 16.16 format.
-                        // scale 1.0 = 0x010000. When there is no
-                        // need to change the display, skip it.
-                        ALOGV("Get OMX_IndexConfigScale: 0x%x/0x%x",
-                                scale.xWidth, scale.xHeight);
-
-                        if (scale.xWidth != 0x010000) {
-                            mOutputFormat->setInt32(kKeyDisplayWidth,
-                                    ((right - left +  1) * scale.xWidth)  >> 16);
-                            mOutputPortSettingsHaveChanged = true;
-                        }
-
-                        if (scale.xHeight != 0x010000) {
-                            mOutputFormat->setInt32(kKeyDisplayHeight,
-                                    ((bottom  - top + 1) * scale.xHeight) >> 16);
-                            mOutputPortSettingsHaveChanged = true;
-                        }
-                    }
-                }
-            }
-            break;
-        }
-
-#if 0
-        case OMX_EventBufferFlag:
-        {
-            CODEC_LOGV("EVENT_BUFFER_FLAG(%ld)", data1);
-
-            if (data1 == kPortIndexOutput) {
-                mNoMoreOutputData = true;
-            }
-            break;
-        }
-#endif
-
-        default:
-        {
-            CODEC_LOGV("EVENT(%d, %u, %u)", event, data1, data2);
-            break;
-        }
-    }
-}
-
-void OMXCodec::onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data) {
-    switch (cmd) {
-        case OMX_CommandStateSet:
-        {
-            onStateChange((OMX_STATETYPE)data);
-            break;
-        }
-
-        case OMX_CommandPortDisable:
-        {
-            OMX_U32 portIndex = data;
-            CODEC_LOGV("PORT_DISABLED(%u)", portIndex);
-
-            CHECK(mState == EXECUTING || mState == RECONFIGURING);
-            CHECK_EQ((int)mPortStatus[portIndex], (int)DISABLING);
-            CHECK_EQ(mPortBuffers[portIndex].size(), 0u);
-
-            mPortStatus[portIndex] = DISABLED;
-
-            if (mState == RECONFIGURING) {
-                CHECK_EQ(portIndex, (OMX_U32)kPortIndexOutput);
-
-                sp<MetaData> oldOutputFormat = mOutputFormat;
-                initOutputFormat(mSource->getFormat());
-
-                // Don't notify clients if the output port settings change
-                // wasn't of importance to them, i.e. it may be that just the
-                // number of buffers has changed and nothing else.
-                bool formatChanged = formatHasNotablyChanged(oldOutputFormat, mOutputFormat);
-                if (!mOutputPortSettingsHaveChanged) {
-                    mOutputPortSettingsHaveChanged = formatChanged;
-                }
-
-                status_t err = enablePortAsync(portIndex);
-                if (err != OK) {
-                    CODEC_LOGE("enablePortAsync(%u) failed (err = %d)", portIndex, err);
-                    setState(ERROR);
-                } else {
-                    err = allocateBuffersOnPort(portIndex);
-                    if (err != OK) {
-                        CODEC_LOGE("allocateBuffersOnPort (%s) failed "
-                                   "(err = %d)",
-                                   portIndex == kPortIndexInput
-                                        ? "input" : "output",
-                                   err);
-
-                        setState(ERROR);
-                    }
-                }
-            }
-            break;
-        }
-
-        case OMX_CommandPortEnable:
-        {
-            OMX_U32 portIndex = data;
-            CODEC_LOGV("PORT_ENABLED(%u)", portIndex);
-
-            CHECK(mState == EXECUTING || mState == RECONFIGURING);
-            CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLING);
-
-            mPortStatus[portIndex] = ENABLED;
-
-            if (mState == RECONFIGURING) {
-                CHECK_EQ(portIndex, (OMX_U32)kPortIndexOutput);
-
-                setState(EXECUTING);
-
-                fillOutputBuffers();
-            }
-            break;
-        }
-
-        case OMX_CommandFlush:
-        {
-            OMX_U32 portIndex = data;
-
-            CODEC_LOGV("FLUSH_DONE(%u)", portIndex);
-
-            CHECK_EQ((int)mPortStatus[portIndex], (int)SHUTTING_DOWN);
-            mPortStatus[portIndex] = ENABLED;
-
-            CHECK_EQ(countBuffersWeOwn(mPortBuffers[portIndex]),
-                     mPortBuffers[portIndex].size());
-
-            if (mSkipCutBuffer != NULL && mPortStatus[kPortIndexOutput] == ENABLED) {
-                mSkipCutBuffer->clear();
-            }
-
-            if (mState == RECONFIGURING) {
-                CHECK_EQ(portIndex, (OMX_U32)kPortIndexOutput);
-
-                disablePortAsync(portIndex);
-            } else if (mState == EXECUTING_TO_IDLE) {
-                if (mPortStatus[kPortIndexInput] == ENABLED
-                    && mPortStatus[kPortIndexOutput] == ENABLED) {
-                    CODEC_LOGV("Finished flushing both ports, now completing "
-                         "transition from EXECUTING to IDLE.");
-
-                    mPortStatus[kPortIndexInput] = SHUTTING_DOWN;
-                    mPortStatus[kPortIndexOutput] = SHUTTING_DOWN;
-
-                    status_t err =
-                        mOMX->sendCommand(mNode, OMX_CommandStateSet, OMX_StateIdle);
-                    CHECK_EQ(err, (status_t)OK);
-                }
-            } else {
-                // We're flushing both ports in preparation for seeking.
-
-                if (mPortStatus[kPortIndexInput] == ENABLED
-                    && mPortStatus[kPortIndexOutput] == ENABLED) {
-                    CODEC_LOGV("Finished flushing both ports, now continuing from"
-                         " seek-time.");
-
-                    // We implicitly resume pulling on our upstream source.
-                    mPaused = false;
-
-                    drainInputBuffers();
-                    fillOutputBuffers();
-                }
-
-                if (mOutputPortSettingsChangedPending) {
-                    CODEC_LOGV(
-                            "Honoring deferred output port settings change.");
-
-                    mOutputPortSettingsChangedPending = false;
-                    onPortSettingsChanged(kPortIndexOutput);
-                }
-            }
-
-            break;
-        }
-
-        default:
-        {
-            CODEC_LOGV("CMD_COMPLETE(%d, %u)", cmd, data);
-            break;
-        }
-    }
-}
-
-void OMXCodec::onStateChange(OMX_STATETYPE newState) {
-    CODEC_LOGV("onStateChange %d", newState);
-
-    switch (newState) {
-        case OMX_StateIdle:
-        {
-            CODEC_LOGV("Now Idle.");
-            if (mState == LOADED_TO_IDLE) {
-                status_t err = mOMX->sendCommand(
-                        mNode, OMX_CommandStateSet, OMX_StateExecuting);
-
-                CHECK_EQ(err, (status_t)OK);
-
-                setState(IDLE_TO_EXECUTING);
-            } else {
-                CHECK_EQ((int)mState, (int)EXECUTING_TO_IDLE);
-
-                if (countBuffersWeOwn(mPortBuffers[kPortIndexInput]) !=
-                    mPortBuffers[kPortIndexInput].size()) {
-                    ALOGE("Codec did not return all input buffers "
-                          "(received %zu / %zu)",
-                            countBuffersWeOwn(mPortBuffers[kPortIndexInput]),
-                            mPortBuffers[kPortIndexInput].size());
-                    TRESPASS();
-                }
-
-                if (countBuffersWeOwn(mPortBuffers[kPortIndexOutput]) !=
-                    mPortBuffers[kPortIndexOutput].size()) {
-                    ALOGE("Codec did not return all output buffers "
-                          "(received %zu / %zu)",
-                            countBuffersWeOwn(mPortBuffers[kPortIndexOutput]),
-                            mPortBuffers[kPortIndexOutput].size());
-                    TRESPASS();
-                }
-
-                status_t err = mOMX->sendCommand(
-                        mNode, OMX_CommandStateSet, OMX_StateLoaded);
-
-                CHECK_EQ(err, (status_t)OK);
-
-                err = freeBuffersOnPort(kPortIndexInput);
-                CHECK_EQ(err, (status_t)OK);
-
-                err = freeBuffersOnPort(kPortIndexOutput);
-                CHECK_EQ(err, (status_t)OK);
-
-                mPortStatus[kPortIndexInput] = ENABLED;
-                mPortStatus[kPortIndexOutput] = ENABLED;
-
-                if ((mFlags & kEnableGrallocUsageProtected) &&
-                        mNativeWindow != NULL) {
-                    // We push enough 1x1 blank buffers to ensure that one of
-                    // them has made it to the display.  This allows the OMX
-                    // component teardown to zero out any protected buffers
-                    // without the risk of scanning out one of those buffers.
-                    pushBlankBuffersToNativeWindow(mNativeWindow.get());
-                }
-
-                setState(IDLE_TO_LOADED);
-            }
-            break;
-        }
-
-        case OMX_StateExecuting:
-        {
-            CHECK_EQ((int)mState, (int)IDLE_TO_EXECUTING);
-
-            CODEC_LOGV("Now Executing.");
-
-            mOutputPortSettingsChangedPending = false;
-
-            setState(EXECUTING);
-
-            // Buffers will be submitted to the component in the first
-            // call to OMXCodec::read as mInitialBufferSubmit is true at
-            // this point. This ensures that this on_message call returns,
-            // releases the lock and ::init can notice the state change and
-            // itself return.
-            break;
-        }
-
-        case OMX_StateLoaded:
-        {
-            CHECK_EQ((int)mState, (int)IDLE_TO_LOADED);
-
-            CODEC_LOGV("Now Loaded.");
-
-            setState(LOADED);
-            break;
-        }
-
-        case OMX_StateInvalid:
-        {
-            setState(ERROR);
-            break;
-        }
-
-        default:
-        {
-            CHECK(!"should not be here.");
-            break;
-        }
-    }
-}
-
-// static
-size_t OMXCodec::countBuffersWeOwn(const Vector<BufferInfo> &buffers) {
-    size_t n = 0;
-    for (size_t i = 0; i < buffers.size(); ++i) {
-        if (buffers[i].mStatus != OWNED_BY_COMPONENT) {
-            ++n;
-        }
-    }
-
-    return n;
-}
-
-status_t OMXCodec::freeBuffersOnPort(
-        OMX_U32 portIndex, bool onlyThoseWeOwn) {
-    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
-
-    status_t stickyErr = OK;
-
-    for (size_t i = buffers->size(); i-- > 0;) {
-        BufferInfo *info = &buffers->editItemAt(i);
-
-        if (onlyThoseWeOwn && info->mStatus == OWNED_BY_COMPONENT) {
-            continue;
-        }
-
-        CHECK(info->mStatus == OWNED_BY_US
-                || info->mStatus == OWNED_BY_NATIVE_WINDOW);
-
-        CODEC_LOGV("freeing buffer %u on port %u", info->mBuffer, portIndex);
-
-        status_t err = freeBuffer(portIndex, i);
-
-        if (err != OK) {
-            stickyErr = err;
-        }
-
-    }
-
-    CHECK(onlyThoseWeOwn || buffers->isEmpty());
-
-    return stickyErr;
-}
-
-status_t OMXCodec::freeBuffer(OMX_U32 portIndex, size_t bufIndex) {
-    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
-
-    BufferInfo *info = &buffers->editItemAt(bufIndex);
-
-    status_t err = mOMX->freeBuffer(mNode, portIndex, info->mBuffer);
-
-    if (err == OK && info->mMediaBuffer != NULL) {
-        CHECK_EQ(portIndex, (OMX_U32)kPortIndexOutput);
-        info->mMediaBuffer->setObserver(NULL);
-
-        // Make sure nobody but us owns this buffer at this point.
-        CHECK_EQ(info->mMediaBuffer->refcount(), 0);
-
-        // Cancel the buffer if it belongs to an ANativeWindow.
-        sp<GraphicBuffer> graphicBuffer = info->mMediaBuffer->graphicBuffer();
-        if (info->mStatus == OWNED_BY_US && graphicBuffer != 0) {
-            err = cancelBufferToNativeWindow(info);
-        }
-
-        info->mMediaBuffer->release();
-        info->mMediaBuffer = NULL;
-    }
-
-    if (err == OK) {
-        buffers->removeAt(bufIndex);
-    }
-
-    return err;
-}
-
-void OMXCodec::onPortSettingsChanged(OMX_U32 portIndex) {
-    CODEC_LOGV("PORT_SETTINGS_CHANGED(%u)", portIndex);
-
-    CHECK(mState == EXECUTING || mState == EXECUTING_TO_IDLE);
-    CHECK_EQ(portIndex, (OMX_U32)kPortIndexOutput);
-    CHECK(!mOutputPortSettingsChangedPending);
-
-    if (mPortStatus[kPortIndexOutput] != ENABLED) {
-        CODEC_LOGV("Deferring output port settings change.");
-        mOutputPortSettingsChangedPending = true;
-        return;
-    }
-
-    setState(RECONFIGURING);
-
-    if (mQuirks & kNeedsFlushBeforeDisable) {
-        if (!flushPortAsync(portIndex)) {
-            onCmdComplete(OMX_CommandFlush, portIndex);
-        }
-    } else {
-        disablePortAsync(portIndex);
-    }
-}
-
-bool OMXCodec::flushPortAsync(OMX_U32 portIndex) {
-    CHECK(mState == EXECUTING || mState == RECONFIGURING
-            || mState == EXECUTING_TO_IDLE);
-
-    CODEC_LOGV("flushPortAsync(%u): we own %zu out of %zu buffers already.",
-         portIndex, countBuffersWeOwn(mPortBuffers[portIndex]),
-         mPortBuffers[portIndex].size());
-
-    CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLED);
-    mPortStatus[portIndex] = SHUTTING_DOWN;
-
-    if ((mQuirks & kRequiresFlushCompleteEmulation)
-        && countBuffersWeOwn(mPortBuffers[portIndex])
-                == mPortBuffers[portIndex].size()) {
-        // No flush is necessary and this component fails to send a
-        // flush-complete event in this case.
-
-        return false;
-    }
-
-    status_t err =
-        mOMX->sendCommand(mNode, OMX_CommandFlush, portIndex);
-    CHECK_EQ(err, (status_t)OK);
-
-    return true;
-}
-
-void OMXCodec::disablePortAsync(OMX_U32 portIndex) {
-    CHECK(mState == EXECUTING || mState == RECONFIGURING);
-
-    CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLED);
-    mPortStatus[portIndex] = DISABLING;
-
-    CODEC_LOGV("sending OMX_CommandPortDisable(%u)", portIndex);
-    status_t err =
-        mOMX->sendCommand(mNode, OMX_CommandPortDisable, portIndex);
-    CHECK_EQ(err, (status_t)OK);
-
-    freeBuffersOnPort(portIndex, true);
-}
-
-status_t OMXCodec::enablePortAsync(OMX_U32 portIndex) {
-    CHECK(mState == EXECUTING || mState == RECONFIGURING);
-
-    CHECK_EQ((int)mPortStatus[portIndex], (int)DISABLED);
-    mPortStatus[portIndex] = ENABLING;
-
-    CODEC_LOGV("sending OMX_CommandPortEnable(%u)", portIndex);
-    return mOMX->sendCommand(mNode, OMX_CommandPortEnable, portIndex);
-}
-
-void OMXCodec::fillOutputBuffers() {
-    CHECK_EQ((int)mState, (int)EXECUTING);
-
-    // This is a workaround for some decoders not properly reporting
-    // end-of-output-stream. If we own all input buffers and also own
-    // all output buffers and we already signalled end-of-input-stream,
-    // the end-of-output-stream is implied.
-    if (mSignalledEOS
-            && countBuffersWeOwn(mPortBuffers[kPortIndexInput])
-                == mPortBuffers[kPortIndexInput].size()
-            && countBuffersWeOwn(mPortBuffers[kPortIndexOutput])
-                == mPortBuffers[kPortIndexOutput].size()) {
-        mNoMoreOutputData = true;
-        mBufferFilled.signal();
-
-        return;
-    }
-
-    Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
-    for (size_t i = 0; i < buffers->size(); ++i) {
-        BufferInfo *info = &buffers->editItemAt(i);
-        if (info->mStatus == OWNED_BY_US) {
-            fillOutputBuffer(&buffers->editItemAt(i));
-        }
-    }
-}
-
-void OMXCodec::drainInputBuffers() {
-    CHECK(mState == EXECUTING || mState == RECONFIGURING);
-
-    if (mFlags & kUseSecureInputBuffers) {
-        Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
-        for (size_t i = 0; i < buffers->size(); ++i) {
-            if (!drainAnyInputBuffer()
-                    || (mFlags & kOnlySubmitOneInputBufferAtOneTime)) {
-                break;
-            }
-        }
-    } else {
-        Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
-        for (size_t i = 0; i < buffers->size(); ++i) {
-            BufferInfo *info = &buffers->editItemAt(i);
-
-            if (info->mStatus != OWNED_BY_US) {
-                continue;
-            }
-
-            if (!drainInputBuffer(info)) {
-                break;
-            }
-
-            if (mFlags & kOnlySubmitOneInputBufferAtOneTime) {
-                break;
-            }
-        }
-    }
-}
-
-bool OMXCodec::drainAnyInputBuffer() {
-    return drainInputBuffer((BufferInfo *)NULL);
-}
-
-OMXCodec::BufferInfo *OMXCodec::findInputBufferByDataPointer(void *ptr) {
-    Vector<BufferInfo> *infos = &mPortBuffers[kPortIndexInput];
-    for (size_t i = 0; i < infos->size(); ++i) {
-        BufferInfo *info = &infos->editItemAt(i);
-
-        if (info->mData == ptr) {
-            CODEC_LOGV(
-                    "input buffer data ptr = %p, buffer_id = %u",
-                    ptr,
-                    info->mBuffer);
-
-            return info;
-        }
-    }
-
-    TRESPASS();
-}
-
-OMXCodec::BufferInfo *OMXCodec::findEmptyInputBuffer() {
-    Vector<BufferInfo> *infos = &mPortBuffers[kPortIndexInput];
-    for (size_t i = 0; i < infos->size(); ++i) {
-        BufferInfo *info = &infos->editItemAt(i);
-
-        if (info->mStatus == OWNED_BY_US) {
-            return info;
-        }
-    }
-
-    TRESPASS();
-}
-
-bool OMXCodec::drainInputBuffer(BufferInfo *info) {
-    if (info != NULL) {
-        CHECK_EQ((int)info->mStatus, (int)OWNED_BY_US);
-    }
-
-    if (mSignalledEOS) {
-        return false;
-    }
-
-    if (mCodecSpecificDataIndex < mCodecSpecificData.size()) {
-        CHECK(!(mFlags & kUseSecureInputBuffers));
-
-        const CodecSpecificData *specific =
-            mCodecSpecificData[mCodecSpecificDataIndex];
-
-        size_t size = specific->mSize;
-
-        if ((!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mMIME) ||
-             !strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mMIME))
-                && !(mQuirks & kWantsNALFragments)) {
-            static const uint8_t kNALStartCode[4] =
-                    { 0x00, 0x00, 0x00, 0x01 };
-
-            CHECK(info->mSize >= specific->mSize + 4);
-
-            size += 4;
-
-            memcpy(info->mData, kNALStartCode, 4);
-            memcpy((uint8_t *)info->mData + 4,
-                   specific->mData, specific->mSize);
-        } else {
-            CHECK(info->mSize >= specific->mSize);
-            memcpy(info->mData, specific->mData, specific->mSize);
-        }
-
-        mNoMoreOutputData = false;
-
-        CODEC_LOGV("calling emptyBuffer with codec specific data");
-
-        status_t err = mOMX->emptyBuffer(
-                mNode, info->mBuffer, 0, size,
-                OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_CODECCONFIG,
-                0);
-        CHECK_EQ(err, (status_t)OK);
-
-        info->mStatus = OWNED_BY_COMPONENT;
-
-        ++mCodecSpecificDataIndex;
-        return true;
-    }
-
-    if (mPaused) {
-        return false;
-    }
-
-    status_t err;
-
-    bool signalEOS = false;
-    int64_t timestampUs = 0;
-
-    size_t offset = 0;
-    int32_t n = 0;
-
-
-    for (;;) {
-        MediaBuffer *srcBuffer;
-        if (mSeekTimeUs >= 0) {
-            if (mLeftOverBuffer) {
-                mLeftOverBuffer->release();
-                mLeftOverBuffer = NULL;
-            }
-
-            MediaSource::ReadOptions options;
-            options.setSeekTo(mSeekTimeUs, mSeekMode);
-
-            mSeekTimeUs = -1;
-            mSeekMode = ReadOptions::SEEK_CLOSEST_SYNC;
-            mBufferFilled.signal();
-
-            err = mSource->read(&srcBuffer, &options);
-
-            if (err == OK) {
-                int64_t targetTimeUs;
-                if (srcBuffer->meta_data()->findInt64(
-                            kKeyTargetTime, &targetTimeUs)
-                        && targetTimeUs >= 0) {
-                    CODEC_LOGV("targetTimeUs = %lld us", (long long)targetTimeUs);
-                    mTargetTimeUs = targetTimeUs;
-                } else {
-                    mTargetTimeUs = -1;
-                }
-            }
-        } else if (mLeftOverBuffer) {
-            srcBuffer = mLeftOverBuffer;
-            mLeftOverBuffer = NULL;
-
-            err = OK;
-        } else {
-            err = mSource->read(&srcBuffer);
-        }
-
-        if (err != OK) {
-            signalEOS = true;
-            mFinalStatus = err;
-            mSignalledEOS = true;
-            mBufferFilled.signal();
-            break;
-        }
-
-        if (mFlags & kUseSecureInputBuffers) {
-            info = findInputBufferByDataPointer(srcBuffer->data());
-            CHECK(info != NULL);
-        }
-
-        size_t remainingBytes = info->mSize - offset;
-
-        if (srcBuffer->range_length() > remainingBytes) {
-            if (offset == 0) {
-                CODEC_LOGE(
-                     "Codec's input buffers are too small to accomodate "
-                     "buffer read from source (info->mSize = %zu, srcLength = %zu)",
-                     info->mSize, srcBuffer->range_length());
-
-                srcBuffer->release();
-                srcBuffer = NULL;
-
-                setState(ERROR);
-                return false;
-            }
-
-            mLeftOverBuffer = srcBuffer;
-            break;
-        }
-
-        bool releaseBuffer = true;
-        if (mFlags & kStoreMetaDataInVideoBuffers) {
-                releaseBuffer = false;
-                info->mMediaBuffer = srcBuffer;
-        }
-
-        if (mFlags & kUseSecureInputBuffers) {
-                // Data in "info" is already provided at this time.
-
-                releaseBuffer = false;
-
-                CHECK(info->mMediaBuffer == NULL);
-                info->mMediaBuffer = srcBuffer;
-        } else {
-            CHECK(srcBuffer->data() != NULL) ;
-            memcpy((uint8_t *)info->mData + offset,
-                    (const uint8_t *)srcBuffer->data()
-                        + srcBuffer->range_offset(),
-                    srcBuffer->range_length());
-        }
-
-        int64_t lastBufferTimeUs;
-        CHECK(srcBuffer->meta_data()->findInt64(kKeyTime, &lastBufferTimeUs));
-        CHECK(lastBufferTimeUs >= 0);
-        if (mIsEncoder && mIsVideo) {
-            mDecodingTimeList.push_back(lastBufferTimeUs);
-        }
-
-        if (offset == 0) {
-            timestampUs = lastBufferTimeUs;
-        }
-
-        offset += srcBuffer->range_length();
-
-        if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_VORBIS, mMIME)) {
-            CHECK(!(mQuirks & kSupportsMultipleFramesPerInputBuffer));
-            CHECK_GE(info->mSize, offset + sizeof(int32_t));
-
-            int32_t numPageSamples;
-            if (!srcBuffer->meta_data()->findInt32(
-                        kKeyValidSamples, &numPageSamples)) {
-                numPageSamples = -1;
-            }
-
-            memcpy((uint8_t *)info->mData + offset,
-                   &numPageSamples,
-                   sizeof(numPageSamples));
-
-            offset += sizeof(numPageSamples);
-        }
-
-        if (releaseBuffer) {
-            srcBuffer->release();
-            srcBuffer = NULL;
-        }
-
-        ++n;
-
-        if (!(mQuirks & kSupportsMultipleFramesPerInputBuffer)) {
-            break;
-        }
-
-        int64_t coalescedDurationUs = lastBufferTimeUs - timestampUs;
-
-        if (coalescedDurationUs > 250000ll) {
-            // Don't coalesce more than 250ms worth of encoded data at once.
-            break;
-        }
-    }
-
-    if (n > 1) {
-        ALOGV("coalesced %d frames into one input buffer", n);
-    }
-
-    OMX_U32 flags = OMX_BUFFERFLAG_ENDOFFRAME;
-
-    if (signalEOS) {
-        flags |= OMX_BUFFERFLAG_EOS;
-    } else {
-        mNoMoreOutputData = false;
-    }
-
-    if (info == NULL) {
-        CHECK(mFlags & kUseSecureInputBuffers);
-        CHECK(signalEOS);
-
-        // This is fishy, there's still a MediaBuffer corresponding to this
-        // info available to the source at this point even though we're going
-        // to use it to signal EOS to the codec.
-        info = findEmptyInputBuffer();
-    }
-
-    CODEC_LOGV("Calling emptyBuffer on buffer %u (length %zu), "
-               "timestamp %lld us (%.2f secs)",
-               info->mBuffer, offset,
-               (long long)timestampUs, timestampUs / 1E6);
-
-    err = mOMX->emptyBuffer(
-            mNode, info->mBuffer, 0, offset,
-            flags, timestampUs);
-
-    if (err != OK) {
-        setState(ERROR);
-        return false;
-    }
-
-    info->mStatus = OWNED_BY_COMPONENT;
-
-    return true;
-}
-
-void OMXCodec::fillOutputBuffer(BufferInfo *info) {
-    CHECK_EQ((int)info->mStatus, (int)OWNED_BY_US);
-
-    if (mNoMoreOutputData) {
-        CODEC_LOGV("There is no more output data available, not "
-             "calling fillOutputBuffer");
-        return;
-    }
-
-    CODEC_LOGV("Calling fillBuffer on buffer %u", info->mBuffer);
-    status_t err = mOMX->fillBuffer(mNode, info->mBuffer);
-
-    if (err != OK) {
-        CODEC_LOGE("fillBuffer failed w/ error 0x%08x", err);
-
-        setState(ERROR);
-        return;
-    }
-
-    info->mStatus = OWNED_BY_COMPONENT;
-}
-
-bool OMXCodec::drainInputBuffer(IOMX::buffer_id buffer) {
-    Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
-    for (size_t i = 0; i < buffers->size(); ++i) {
-        if ((*buffers)[i].mBuffer == buffer) {
-            return drainInputBuffer(&buffers->editItemAt(i));
-        }
-    }
-
-    CHECK(!"should not be here.");
-
-    return false;
-}
-
-void OMXCodec::fillOutputBuffer(IOMX::buffer_id buffer) {
-    Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
-    for (size_t i = 0; i < buffers->size(); ++i) {
-        if ((*buffers)[i].mBuffer == buffer) {
-            fillOutputBuffer(&buffers->editItemAt(i));
-            return;
-        }
-    }
-
-    CHECK(!"should not be here.");
-}
-
-void OMXCodec::setState(State newState) {
-    mState = newState;
-    mAsyncCompletion.signal();
-
-    // This may cause some spurious wakeups but is necessary to
-    // unblock the reader if we enter ERROR state.
-    mBufferFilled.signal();
-}
-
-status_t OMXCodec::waitForBufferFilled_l() {
-
-    if (mIsEncoder) {
-        // For timelapse video recording, the timelapse video recording may
-        // not send an input frame for a _long_ time. Do not use timeout
-        // for video encoding.
-        return mBufferFilled.wait(mLock);
-    }
-    status_t err = mBufferFilled.waitRelative(mLock, kBufferFilledEventTimeOutNs);
-    if (err != OK) {
-        CODEC_LOGE("Timed out waiting for output buffers: %zu/%zu",
-            countBuffersWeOwn(mPortBuffers[kPortIndexInput]),
-            countBuffersWeOwn(mPortBuffers[kPortIndexOutput]));
-    }
-    return err;
-}
-
-void OMXCodec::setRawAudioFormat(
-        OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels) {
-
-    // port definition
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = portIndex;
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-    def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
-    CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamPortDefinition,
-            &def, sizeof(def)), (status_t)OK);
-
-    // pcm param
-    OMX_AUDIO_PARAM_PCMMODETYPE pcmParams;
-    InitOMXParams(&pcmParams);
-    pcmParams.nPortIndex = portIndex;
-
-    err = mOMX->getParameter(
-            mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
-
-    CHECK_EQ(err, (status_t)OK);
-
-    pcmParams.nChannels = numChannels;
-    pcmParams.eNumData = OMX_NumericalDataSigned;
-    pcmParams.bInterleaved = OMX_TRUE;
-    pcmParams.nBitPerSample = 16;
-    pcmParams.nSamplingRate = sampleRate;
-    pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
-
-    CHECK_EQ(getOMXChannelMapping(
-                numChannels, pcmParams.eChannelMapping), (status_t)OK);
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
-
-    CHECK_EQ(err, (status_t)OK);
-}
-
-static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate(bool isAMRWB, int32_t bps) {
-    if (isAMRWB) {
-        if (bps <= 6600) {
-            return OMX_AUDIO_AMRBandModeWB0;
-        } else if (bps <= 8850) {
-            return OMX_AUDIO_AMRBandModeWB1;
-        } else if (bps <= 12650) {
-            return OMX_AUDIO_AMRBandModeWB2;
-        } else if (bps <= 14250) {
-            return OMX_AUDIO_AMRBandModeWB3;
-        } else if (bps <= 15850) {
-            return OMX_AUDIO_AMRBandModeWB4;
-        } else if (bps <= 18250) {
-            return OMX_AUDIO_AMRBandModeWB5;
-        } else if (bps <= 19850) {
-            return OMX_AUDIO_AMRBandModeWB6;
-        } else if (bps <= 23050) {
-            return OMX_AUDIO_AMRBandModeWB7;
-        }
-
-        // 23850 bps
-        return OMX_AUDIO_AMRBandModeWB8;
-    } else {  // AMRNB
-        if (bps <= 4750) {
-            return OMX_AUDIO_AMRBandModeNB0;
-        } else if (bps <= 5150) {
-            return OMX_AUDIO_AMRBandModeNB1;
-        } else if (bps <= 5900) {
-            return OMX_AUDIO_AMRBandModeNB2;
-        } else if (bps <= 6700) {
-            return OMX_AUDIO_AMRBandModeNB3;
-        } else if (bps <= 7400) {
-            return OMX_AUDIO_AMRBandModeNB4;
-        } else if (bps <= 7950) {
-            return OMX_AUDIO_AMRBandModeNB5;
-        } else if (bps <= 10200) {
-            return OMX_AUDIO_AMRBandModeNB6;
-        }
-
-        // 12200 bps
-        return OMX_AUDIO_AMRBandModeNB7;
-    }
-}
-
-void OMXCodec::setAMRFormat(bool isWAMR, int32_t bitRate) {
-    OMX_U32 portIndex = mIsEncoder ? kPortIndexOutput : kPortIndexInput;
-
-    OMX_AUDIO_PARAM_AMRTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = portIndex;
-
-    status_t err =
-        mOMX->getParameter(mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
-
-    CHECK_EQ(err, (status_t)OK);
-
-    def.eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
-
-    def.eAMRBandMode = pickModeFromBitRate(isWAMR, bitRate);
-    err = mOMX->setParameter(mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    ////////////////////////
-
-    if (mIsEncoder) {
-        sp<MetaData> format = mSource->getFormat();
-        int32_t sampleRate;
-        int32_t numChannels;
-        CHECK(format->findInt32(kKeySampleRate, &sampleRate));
-        CHECK(format->findInt32(kKeyChannelCount, &numChannels));
-
-        setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
-    }
-}
-
-status_t OMXCodec::setAACFormat(
-        int32_t numChannels, int32_t sampleRate, int32_t bitRate, int32_t aacProfile, bool isADTS) {
-    if (numChannels > 2) {
-        ALOGW("Number of channels: (%d) \n", numChannels);
-    }
-
-    if (mIsEncoder) {
-        if (isADTS) {
-            return -EINVAL;
-        }
-
-        //////////////// input port ////////////////////
-        setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
-
-        //////////////// output port ////////////////////
-        // format
-        OMX_AUDIO_PARAM_PORTFORMATTYPE format;
-        InitOMXParams(&format);
-        format.nPortIndex = kPortIndexOutput;
-        format.nIndex = 0;
-        status_t err = OMX_ErrorNone;
-        while (OMX_ErrorNone == err) {
-            CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioPortFormat,
-                    &format, sizeof(format)), (status_t)OK);
-            if (format.eEncoding == OMX_AUDIO_CodingAAC) {
-                break;
-            }
-            format.nIndex++;
-        }
-        CHECK_EQ((status_t)OK, err);
-        CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioPortFormat,
-                &format, sizeof(format)), (status_t)OK);
-
-        // port definition
-        OMX_PARAM_PORTDEFINITIONTYPE def;
-        InitOMXParams(&def);
-        def.nPortIndex = kPortIndexOutput;
-        CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamPortDefinition,
-                &def, sizeof(def)), (status_t)OK);
-        def.format.audio.bFlagErrorConcealment = OMX_TRUE;
-        def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
-        CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamPortDefinition,
-                &def, sizeof(def)), (status_t)OK);
-
-        // profile
-        OMX_AUDIO_PARAM_AACPROFILETYPE profile;
-        InitOMXParams(&profile);
-        profile.nPortIndex = kPortIndexOutput;
-        CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioAac,
-                &profile, sizeof(profile)), (status_t)OK);
-        profile.nChannels = numChannels;
-        profile.eChannelMode = (numChannels == 1?
-                OMX_AUDIO_ChannelModeMono: OMX_AUDIO_ChannelModeStereo);
-        profile.nSampleRate = sampleRate;
-        profile.nBitRate = bitRate;
-        profile.nAudioBandWidth = 0;
-        profile.nFrameLength = 0;
-        profile.nAACtools = OMX_AUDIO_AACToolAll;
-        profile.nAACERtools = OMX_AUDIO_AACERNone;
-        profile.eAACProfile = (OMX_AUDIO_AACPROFILETYPE) aacProfile;
-        profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
-        err = mOMX->setParameter(mNode, OMX_IndexParamAudioAac,
-                &profile, sizeof(profile));
-
-        if (err != OK) {
-            CODEC_LOGE("setParameter('OMX_IndexParamAudioAac') failed "
-                       "(err = %d)",
-                       err);
-            return err;
-        }
-    } else {
-        OMX_AUDIO_PARAM_AACPROFILETYPE profile;
-        InitOMXParams(&profile);
-        profile.nPortIndex = kPortIndexInput;
-
-        status_t err = mOMX->getParameter(
-                mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
-        CHECK_EQ(err, (status_t)OK);
-
-        profile.nChannels = numChannels;
-        profile.nSampleRate = sampleRate;
-
-        profile.eAACStreamFormat =
-            isADTS
-                ? OMX_AUDIO_AACStreamFormatMP4ADTS
-                : OMX_AUDIO_AACStreamFormatMP4FF;
-
-        err = mOMX->setParameter(
-                mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
-
-        if (err != OK) {
-            CODEC_LOGE("setParameter('OMX_IndexParamAudioAac') failed "
-                       "(err = %d)",
-                       err);
-            return err;
-        }
-    }
-
-    return OK;
-}
-
-status_t OMXCodec::setAC3Format(int32_t numChannels, int32_t sampleRate) {
-    OMX_AUDIO_PARAM_ANDROID_AC3TYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexInput;
-
-    status_t err = mOMX->getParameter(
-            mNode,
-            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
-            &def,
-            sizeof(def));
-
-    if (err != OK) {
-        return err;
-    }
-
-    def.nChannels = numChannels;
-    def.nSampleRate = sampleRate;
-
-    return mOMX->setParameter(
-            mNode,
-            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
-            &def,
-            sizeof(def));
-}
-
-void OMXCodec::setG711Format(int32_t sampleRate, int32_t numChannels) {
-    CHECK(!mIsEncoder);
-    setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
-}
-
-void OMXCodec::setImageOutputFormat(
-        OMX_COLOR_FORMATTYPE format, OMX_U32 width, OMX_U32 height) {
-    CODEC_LOGV("setImageOutputFormat(%u, %u)", width, height);
-
-#if 0
-    OMX_INDEXTYPE index;
-    status_t err = mOMX->get_extension_index(
-            mNode, "OMX.TI.JPEG.decode.Config.OutputColorFormat", &index);
-    CHECK_EQ(err, (status_t)OK);
-
-    err = mOMX->set_config(mNode, index, &format, sizeof(format));
-    CHECK_EQ(err, (status_t)OK);
-#endif
-
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexOutput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainImage);
-
-    OMX_IMAGE_PORTDEFINITIONTYPE *imageDef = &def.format.image;
-
-    CHECK_EQ((int)imageDef->eCompressionFormat, (int)OMX_IMAGE_CodingUnused);
-    imageDef->eColorFormat = format;
-    imageDef->nFrameWidth = width;
-    imageDef->nFrameHeight = height;
-
-    switch (format) {
-        case OMX_COLOR_FormatYUV420PackedPlanar:
-        case OMX_COLOR_FormatYUV411Planar:
-        {
-            def.nBufferSize = (width * height * 3) / 2;
-            break;
-        }
-
-        case OMX_COLOR_FormatCbYCrY:
-        {
-            def.nBufferSize = width * height * 2;
-            break;
-        }
-
-        case OMX_COLOR_Format32bitARGB8888:
-        {
-            def.nBufferSize = width * height * 4;
-            break;
-        }
-
-        case OMX_COLOR_Format16bitARGB4444:
-        case OMX_COLOR_Format16bitARGB1555:
-        case OMX_COLOR_Format16bitRGB565:
-        case OMX_COLOR_Format16bitBGR565:
-        {
-            def.nBufferSize = width * height * 2;
-            break;
-        }
-
-        default:
-            CHECK(!"Should not be here. Unknown color format.");
-            break;
-    }
-
-    def.nBufferCountActual = def.nBufferCountMin;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-}
-
-void OMXCodec::setJPEGInputFormat(
-        OMX_U32 width, OMX_U32 height, OMX_U32 compressedSize) {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexInput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainImage);
-    OMX_IMAGE_PORTDEFINITIONTYPE *imageDef = &def.format.image;
-
-    CHECK_EQ((int)imageDef->eCompressionFormat, (int)OMX_IMAGE_CodingJPEG);
-    imageDef->nFrameWidth = width;
-    imageDef->nFrameHeight = height;
-
-    def.nBufferSize = compressedSize;
-    def.nBufferCountActual = def.nBufferCountMin;
-
-    err = mOMX->setParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-}
-
-void OMXCodec::addCodecSpecificData(const void *data, size_t size) {
-    CodecSpecificData *specific =
-        (CodecSpecificData *)malloc(sizeof(CodecSpecificData) + size - 1);
-
-    specific->mSize = size;
-    memcpy(specific->mData, data, size);
-
-    mCodecSpecificData.push(specific);
-}
-
-void OMXCodec::clearCodecSpecificData() {
-    for (size_t i = 0; i < mCodecSpecificData.size(); ++i) {
-        free(mCodecSpecificData.editItemAt(i));
-    }
-    mCodecSpecificData.clear();
-    mCodecSpecificDataIndex = 0;
-}
-
-status_t OMXCodec::start(MetaData *meta) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mState != LOADED) {
-        CODEC_LOGE("called start in the unexpected state: %d", mState);
-        return UNKNOWN_ERROR;
-    }
-
-    sp<MetaData> params = new MetaData;
-    if (mQuirks & kWantsNALFragments) {
-        params->setInt32(kKeyWantsNALFragments, true);
-    }
-    if (meta) {
-        int64_t startTimeUs = 0;
-        int64_t timeUs;
-        if (meta->findInt64(kKeyTime, &timeUs)) {
-            startTimeUs = timeUs;
-        }
-        params->setInt64(kKeyTime, startTimeUs);
-    }
-
-    mCodecSpecificDataIndex = 0;
-    mInitialBufferSubmit = true;
-    mSignalledEOS = false;
-    mNoMoreOutputData = false;
-    mOutputPortSettingsHaveChanged = false;
-    mSeekTimeUs = -1;
-    mSeekMode = ReadOptions::SEEK_CLOSEST_SYNC;
-    mTargetTimeUs = -1;
-    mFilledBuffers.clear();
-    mPaused = false;
-
-    status_t err;
-    if (mIsEncoder) {
-        // Calling init() before starting its source so that we can configure,
-        // if supported, the source to use exactly the same number of input
-        // buffers as requested by the encoder.
-        if ((err = init()) != OK) {
-            CODEC_LOGE("init failed: %d", err);
-            return err;
-        }
-
-        params->setInt32(kKeyNumBuffers, mPortBuffers[kPortIndexInput].size());
-        err = mSource->start(params.get());
-        if (err != OK) {
-            CODEC_LOGE("source failed to start: %d", err);
-            stopOmxComponent_l();
-        }
-        return err;
-    }
-
-    // Decoder case
-    if ((err = mSource->start(params.get())) != OK) {
-        CODEC_LOGE("source failed to start: %d", err);
-        return err;
-    }
-    return init();
-}
-
-status_t OMXCodec::stop() {
-    CODEC_LOGV("stop mState=%d", mState);
-    Mutex::Autolock autoLock(mLock);
-    status_t err = stopOmxComponent_l();
-    mSource->stop();
-
-    CODEC_LOGV("stopped in state %d", mState);
-    return err;
-}
-
-status_t OMXCodec::stopOmxComponent_l() {
-    CODEC_LOGV("stopOmxComponent_l mState=%d", mState);
-
-    while (isIntermediateState(mState)) {
-        mAsyncCompletion.wait(mLock);
-    }
-
-    bool isError = false;
-    switch (mState) {
-        case LOADED:
-            break;
-
-        case ERROR:
-        {
-            if (mPortStatus[kPortIndexOutput] == ENABLING) {
-                // Codec is in a wedged state (technical term)
-                // We've seen an output port settings change from the codec,
-                // We've disabled the output port, then freed the output
-                // buffers, initiated re-enabling the output port but
-                // failed to reallocate the output buffers.
-                // There doesn't seem to be a way to orderly transition
-                // from executing->idle and idle->loaded now that the
-                // output port hasn't been reenabled yet...
-                // Simply free as many resources as we can and pretend
-                // that we're in LOADED state so that the destructor
-                // will free the component instance without asserting.
-                freeBuffersOnPort(kPortIndexInput, true /* onlyThoseWeOwn */);
-                freeBuffersOnPort(kPortIndexOutput, true /* onlyThoseWeOwn */);
-                setState(LOADED);
-                break;
-            } else {
-                OMX_STATETYPE state = OMX_StateInvalid;
-                status_t err = mOMX->getState(mNode, &state);
-                CHECK_EQ(err, (status_t)OK);
-
-                if (state != OMX_StateExecuting) {
-                    break;
-                }
-                // else fall through to the idling code
-            }
-
-            isError = true;
-        }
-
-        case EXECUTING:
-        {
-            setState(EXECUTING_TO_IDLE);
-
-            if (mQuirks & kRequiresFlushBeforeShutdown) {
-                CODEC_LOGV("This component requires a flush before transitioning "
-                     "from EXECUTING to IDLE...");
-
-                bool emulateInputFlushCompletion =
-                    !flushPortAsync(kPortIndexInput);
-
-                bool emulateOutputFlushCompletion =
-                    !flushPortAsync(kPortIndexOutput);
-
-                if (emulateInputFlushCompletion) {
-                    onCmdComplete(OMX_CommandFlush, kPortIndexInput);
-                }
-
-                if (emulateOutputFlushCompletion) {
-                    onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
-                }
-            } else {
-                mPortStatus[kPortIndexInput] = SHUTTING_DOWN;
-                mPortStatus[kPortIndexOutput] = SHUTTING_DOWN;
-
-                status_t err =
-                    mOMX->sendCommand(mNode, OMX_CommandStateSet, OMX_StateIdle);
-                CHECK_EQ(err, (status_t)OK);
-            }
-
-            while (mState != LOADED && mState != ERROR) {
-                mAsyncCompletion.wait(mLock);
-            }
-
-            if (isError) {
-                // We were in the ERROR state coming in, so restore that now
-                // that we've idled the OMX component.
-                setState(ERROR);
-            }
-
-            break;
-        }
-
-        default:
-        {
-            CHECK(!"should not be here.");
-            break;
-        }
-    }
-
-    if (mLeftOverBuffer) {
-        mLeftOverBuffer->release();
-        mLeftOverBuffer = NULL;
-    }
-
-    return OK;
-}
-
-sp<MetaData> OMXCodec::getFormat() {
-    Mutex::Autolock autoLock(mLock);
-
-    return mOutputFormat;
-}
-
-status_t OMXCodec::read(
-        MediaBuffer **buffer, const ReadOptions *options) {
-    status_t err = OK;
-    *buffer = NULL;
-
-    Mutex::Autolock autoLock(mLock);
-
-    if (mState != EXECUTING && mState != RECONFIGURING) {
-        return UNKNOWN_ERROR;
-    }
-
-    bool seeking = false;
-    int64_t seekTimeUs;
-    ReadOptions::SeekMode seekMode;
-    if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
-        seeking = true;
-    }
-
-    if (mInitialBufferSubmit) {
-        mInitialBufferSubmit = false;
-
-        if (seeking) {
-            CHECK(seekTimeUs >= 0);
-            mSeekTimeUs = seekTimeUs;
-            mSeekMode = seekMode;
-
-            // There's no reason to trigger the code below, there's
-            // nothing to flush yet.
-            seeking = false;
-            mPaused = false;
-        }
-
-        drainInputBuffers();
-
-        if (mState == EXECUTING) {
-            // Otherwise mState == RECONFIGURING and this code will trigger
-            // after the output port is reenabled.
-            fillOutputBuffers();
-        }
-    }
-
-    if (seeking) {
-        while (mState == RECONFIGURING) {
-            if ((err = waitForBufferFilled_l()) != OK) {
-                return err;
-            }
-        }
-
-        if (mState != EXECUTING) {
-            return UNKNOWN_ERROR;
-        }
-
-        CODEC_LOGV("seeking to %" PRId64 " us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6);
-
-        mSignalledEOS = false;
-
-        CHECK(seekTimeUs >= 0);
-        mSeekTimeUs = seekTimeUs;
-        mSeekMode = seekMode;
-
-        mFilledBuffers.clear();
-
-        CHECK_EQ((int)mState, (int)EXECUTING);
-
-        bool emulateInputFlushCompletion = !flushPortAsync(kPortIndexInput);
-        bool emulateOutputFlushCompletion = !flushPortAsync(kPortIndexOutput);
-
-        if (emulateInputFlushCompletion) {
-            onCmdComplete(OMX_CommandFlush, kPortIndexInput);
-        }
-
-        if (emulateOutputFlushCompletion) {
-            onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
-        }
-
-        while (mSeekTimeUs >= 0) {
-            if ((err = waitForBufferFilled_l()) != OK) {
-                return err;
-            }
-        }
-    }
-
-    while (mState != ERROR && !mNoMoreOutputData && mFilledBuffers.empty()) {
-        if ((err = waitForBufferFilled_l()) != OK) {
-            return err;
-        }
-    }
-
-    if (mState == ERROR) {
-        return UNKNOWN_ERROR;
-    }
-
-    if (mFilledBuffers.empty()) {
-        return mSignalledEOS ? mFinalStatus : ERROR_END_OF_STREAM;
-    }
-
-    if (mOutputPortSettingsHaveChanged) {
-        mOutputPortSettingsHaveChanged = false;
-
-        return INFO_FORMAT_CHANGED;
-    }
-
-    size_t index = *mFilledBuffers.begin();
-    mFilledBuffers.erase(mFilledBuffers.begin());
-
-    BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(index);
-    CHECK_EQ((int)info->mStatus, (int)OWNED_BY_US);
-    info->mStatus = OWNED_BY_CLIENT;
-
-    info->mMediaBuffer->add_ref();
-    if (mSkipCutBuffer != NULL) {
-        mSkipCutBuffer->submit(info->mMediaBuffer);
-    }
-    *buffer = info->mMediaBuffer;
-
-    return OK;
-}
-
-void OMXCodec::signalBufferReturned(MediaBuffer *buffer) {
-    Mutex::Autolock autoLock(mLock);
-
-    Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
-    for (size_t i = 0; i < buffers->size(); ++i) {
-        BufferInfo *info = &buffers->editItemAt(i);
-
-        if (info->mMediaBuffer == buffer) {
-            CHECK_EQ((int)mPortStatus[kPortIndexOutput], (int)ENABLED);
-            CHECK_EQ((int)info->mStatus, (int)OWNED_BY_CLIENT);
-
-            info->mStatus = OWNED_BY_US;
-
-            if (buffer->graphicBuffer() == 0) {
-                fillOutputBuffer(info);
-            } else {
-                sp<MetaData> metaData = info->mMediaBuffer->meta_data();
-                int32_t rendered = 0;
-                if (!metaData->findInt32(kKeyRendered, &rendered)) {
-                    rendered = 0;
-                }
-                if (!rendered) {
-                    status_t err = cancelBufferToNativeWindow(info);
-                    if (err < 0) {
-                        return;
-                    }
-                }
-
-                info->mStatus = OWNED_BY_NATIVE_WINDOW;
-
-                // Dequeue the next buffer from the native window.
-                BufferInfo *nextBufInfo = dequeueBufferFromNativeWindow();
-                if (nextBufInfo == 0) {
-                    return;
-                }
-
-                // Give the buffer to the OMX node to fill.
-                fillOutputBuffer(nextBufInfo);
-            }
-            return;
-        }
-    }
-
-    CHECK(!"should not be here.");
-}
-
-void OMXCodec::dumpPortStatus(OMX_U32 portIndex) {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = portIndex;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    printf("%s Port = {\n", portIndex == kPortIndexInput ? "Input" : "Output");
-
-    CHECK((portIndex == kPortIndexInput && def.eDir == OMX_DirInput)
-          || (portIndex == kPortIndexOutput && def.eDir == OMX_DirOutput));
-
-    printf("  nBufferCountActual = %" PRIu32 "\n", def.nBufferCountActual);
-    printf("  nBufferCountMin = %" PRIu32 "\n", def.nBufferCountMin);
-    printf("  nBufferSize = %" PRIu32 "\n", def.nBufferSize);
-
-    switch (def.eDomain) {
-        case OMX_PortDomainImage:
-        {
-            const OMX_IMAGE_PORTDEFINITIONTYPE *imageDef = &def.format.image;
-
-            printf("\n");
-            printf("  // Image\n");
-            printf("  nFrameWidth = %" PRIu32 "\n", imageDef->nFrameWidth);
-            printf("  nFrameHeight = %" PRIu32 "\n", imageDef->nFrameHeight);
-            printf("  nStride = %" PRIu32 "\n", imageDef->nStride);
-
-            printf("  eCompressionFormat = %s\n",
-                   asString(imageDef->eCompressionFormat));
-
-            printf("  eColorFormat = %s\n",
-                   asString(imageDef->eColorFormat));
-
-            break;
-        }
-
-        case OMX_PortDomainVideo:
-        {
-            OMX_VIDEO_PORTDEFINITIONTYPE *videoDef = &def.format.video;
-
-            printf("\n");
-            printf("  // Video\n");
-            printf("  nFrameWidth = %" PRIu32 "\n", videoDef->nFrameWidth);
-            printf("  nFrameHeight = %" PRIu32 "\n", videoDef->nFrameHeight);
-            printf("  nStride = %" PRIu32 "\n", videoDef->nStride);
-
-            printf("  eCompressionFormat = %s\n",
-                   asString(videoDef->eCompressionFormat));
-
-            printf("  eColorFormat = %s\n",
-                   asString(videoDef->eColorFormat));
-
-            break;
-        }
-
-        case OMX_PortDomainAudio:
-        {
-            OMX_AUDIO_PORTDEFINITIONTYPE *audioDef = &def.format.audio;
-
-            printf("\n");
-            printf("  // Audio\n");
-            printf("  eEncoding = %s\n",
-                   asString(audioDef->eEncoding));
-
-            if (audioDef->eEncoding == OMX_AUDIO_CodingPCM) {
-                OMX_AUDIO_PARAM_PCMMODETYPE params;
-                InitOMXParams(&params);
-                params.nPortIndex = portIndex;
-
-                err = mOMX->getParameter(
-                        mNode, OMX_IndexParamAudioPcm, &params, sizeof(params));
-                CHECK_EQ(err, (status_t)OK);
-
-                printf("  nSamplingRate = %" PRIu32 "\n", params.nSamplingRate);
-                printf("  nChannels = %" PRIu32 "\n", params.nChannels);
-                printf("  bInterleaved = %d\n", params.bInterleaved);
-                printf("  nBitPerSample = %" PRIu32 "\n", params.nBitPerSample);
-
-                printf("  eNumData = %s\n",
-                       params.eNumData == OMX_NumericalDataSigned
-                        ? "signed" : "unsigned");
-
-                printf("  ePCMMode = %s\n", asString(params.ePCMMode));
-            } else if (audioDef->eEncoding == OMX_AUDIO_CodingAMR) {
-                OMX_AUDIO_PARAM_AMRTYPE amr;
-                InitOMXParams(&amr);
-                amr.nPortIndex = portIndex;
-
-                err = mOMX->getParameter(
-                        mNode, OMX_IndexParamAudioAmr, &amr, sizeof(amr));
-                CHECK_EQ(err, (status_t)OK);
-
-                printf("  nChannels = %" PRIu32 "\n", amr.nChannels);
-                printf("  eAMRBandMode = %s\n",
-                        asString(amr.eAMRBandMode));
-                printf("  eAMRFrameFormat = %s\n",
-                        asString(amr.eAMRFrameFormat));
-            }
-
-            break;
-        }
-
-        default:
-        {
-            printf("  // Unknown\n");
-            break;
-        }
-    }
-
-    printf("}\n");
-}
-
-status_t OMXCodec::initNativeWindow() {
-    // Enable use of a GraphicBuffer as the output for this node.  This must
-    // happen before getting the IndexParamPortDefinition parameter because it
-    // will affect the pixel format that the node reports.
-    status_t err = mOMX->enableGraphicBuffers(mNode, kPortIndexOutput, OMX_TRUE);
-    if (err != 0) {
-        return err;
-    }
-
-    return OK;
-}
-
-void OMXCodec::initNativeWindowCrop() {
-    int32_t left, top, right, bottom;
-
-    CHECK(mOutputFormat->findRect(
-                        kKeyCropRect,
-                        &left, &top, &right, &bottom));
-
-    android_native_rect_t crop;
-    crop.left = left;
-    crop.top = top;
-    crop.right = right + 1;
-    crop.bottom = bottom + 1;
-
-    // We'll ignore any errors here, if the surface is
-    // already invalid, we'll know soon enough.
-    native_window_set_crop(mNativeWindow.get(), &crop);
-}
-
-void OMXCodec::initOutputFormat(const sp<MetaData> &inputFormat) {
-    mOutputFormat = new MetaData;
-    mOutputFormat->setCString(kKeyDecoderComponent, mComponentName);
-    if (mIsEncoder) {
-        int32_t timeScale;
-        if (inputFormat->findInt32(kKeyTimeScale, &timeScale)) {
-            mOutputFormat->setInt32(kKeyTimeScale, timeScale);
-        }
-    }
-
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = kPortIndexOutput;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-    CHECK_EQ(err, (status_t)OK);
-
-    switch (def.eDomain) {
-        case OMX_PortDomainImage:
-        {
-            OMX_IMAGE_PORTDEFINITIONTYPE *imageDef = &def.format.image;
-            CHECK_EQ((int)imageDef->eCompressionFormat,
-                     (int)OMX_IMAGE_CodingUnused);
-
-            mOutputFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
-            mOutputFormat->setInt32(kKeyColorFormat, imageDef->eColorFormat);
-            mOutputFormat->setInt32(kKeyWidth, imageDef->nFrameWidth);
-            mOutputFormat->setInt32(kKeyHeight, imageDef->nFrameHeight);
-            break;
-        }
-
-        case OMX_PortDomainAudio:
-        {
-            OMX_AUDIO_PORTDEFINITIONTYPE *audio_def = &def.format.audio;
-
-            if (audio_def->eEncoding == OMX_AUDIO_CodingPCM) {
-                OMX_AUDIO_PARAM_PCMMODETYPE params;
-                InitOMXParams(&params);
-                params.nPortIndex = kPortIndexOutput;
-
-                err = mOMX->getParameter(
-                        mNode, OMX_IndexParamAudioPcm, &params, sizeof(params));
-                CHECK_EQ(err, (status_t)OK);
-
-                CHECK_EQ((int)params.eNumData, (int)OMX_NumericalDataSigned);
-                CHECK_EQ(params.nBitPerSample, 16u);
-                CHECK_EQ((int)params.ePCMMode, (int)OMX_AUDIO_PCMModeLinear);
-
-                int32_t numChannels, sampleRate;
-                inputFormat->findInt32(kKeyChannelCount, &numChannels);
-                inputFormat->findInt32(kKeySampleRate, &sampleRate);
-
-                if ((OMX_U32)numChannels != params.nChannels) {
-                    ALOGV("Codec outputs a different number of channels than "
-                         "the input stream contains (contains %d channels, "
-                         "codec outputs %u channels).",
-                         numChannels, params.nChannels);
-                }
-
-                if (sampleRate != (int32_t)params.nSamplingRate) {
-                    ALOGV("Codec outputs at different sampling rate than "
-                         "what the input stream contains (contains data at "
-                         "%d Hz, codec outputs %u Hz)",
-                         sampleRate, params.nSamplingRate);
-                }
-
-                mOutputFormat->setCString(
-                        kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
-
-                // Use the codec-advertised number of channels, as some
-                // codecs appear to output stereo even if the input data is
-                // mono. If we know the codec lies about this information,
-                // use the actual number of channels instead.
-                mOutputFormat->setInt32(
-                        kKeyChannelCount,
-                        (mQuirks & kDecoderLiesAboutNumberOfChannels)
-                            ? numChannels : params.nChannels);
-
-                mOutputFormat->setInt32(kKeySampleRate, params.nSamplingRate);
-            } else if (audio_def->eEncoding == OMX_AUDIO_CodingAMR) {
-                OMX_AUDIO_PARAM_AMRTYPE amr;
-                InitOMXParams(&amr);
-                amr.nPortIndex = kPortIndexOutput;
-
-                err = mOMX->getParameter(
-                        mNode, OMX_IndexParamAudioAmr, &amr, sizeof(amr));
-                CHECK_EQ(err, (status_t)OK);
-
-                CHECK_EQ(amr.nChannels, 1u);
-                mOutputFormat->setInt32(kKeyChannelCount, 1);
-
-                if (amr.eAMRBandMode >= OMX_AUDIO_AMRBandModeNB0
-                    && amr.eAMRBandMode <= OMX_AUDIO_AMRBandModeNB7) {
-                    mOutputFormat->setCString(
-                            kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AMR_NB);
-                    mOutputFormat->setInt32(kKeySampleRate, 8000);
-                } else if (amr.eAMRBandMode >= OMX_AUDIO_AMRBandModeWB0
-                            && amr.eAMRBandMode <= OMX_AUDIO_AMRBandModeWB8) {
-                    mOutputFormat->setCString(
-                            kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AMR_WB);
-                    mOutputFormat->setInt32(kKeySampleRate, 16000);
-                } else {
-                    CHECK(!"Unknown AMR band mode.");
-                }
-            } else if (audio_def->eEncoding == OMX_AUDIO_CodingAAC) {
-                mOutputFormat->setCString(
-                        kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
-                int32_t numChannels, sampleRate, bitRate;
-                inputFormat->findInt32(kKeyChannelCount, &numChannels);
-                inputFormat->findInt32(kKeySampleRate, &sampleRate);
-                inputFormat->findInt32(kKeyBitRate, &bitRate);
-                mOutputFormat->setInt32(kKeyChannelCount, numChannels);
-                mOutputFormat->setInt32(kKeySampleRate, sampleRate);
-                mOutputFormat->setInt32(kKeyBitRate, bitRate);
-            } else if (audio_def->eEncoding ==
-                    (OMX_AUDIO_CODINGTYPE)OMX_AUDIO_CodingAndroidAC3) {
-                mOutputFormat->setCString(
-                        kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
-                int32_t numChannels, sampleRate, bitRate;
-                inputFormat->findInt32(kKeyChannelCount, &numChannels);
-                inputFormat->findInt32(kKeySampleRate, &sampleRate);
-                inputFormat->findInt32(kKeyBitRate, &bitRate);
-                mOutputFormat->setInt32(kKeyChannelCount, numChannels);
-                mOutputFormat->setInt32(kKeySampleRate, sampleRate);
-                mOutputFormat->setInt32(kKeyBitRate, bitRate);
-            } else {
-                CHECK(!"Should not be here. Unknown audio encoding.");
-            }
-            break;
-        }
-
-        case OMX_PortDomainVideo:
-        {
-            OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
-
-            if (video_def->eCompressionFormat == OMX_VIDEO_CodingUnused) {
-                mOutputFormat->setCString(
-                        kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
-            } else if (video_def->eCompressionFormat == OMX_VIDEO_CodingMPEG4) {
-                mOutputFormat->setCString(
-                        kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
-            } else if (video_def->eCompressionFormat == OMX_VIDEO_CodingH263) {
-                mOutputFormat->setCString(
-                        kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
-            } else if (video_def->eCompressionFormat == OMX_VIDEO_CodingAVC) {
-                mOutputFormat->setCString(
-                        kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-            } else {
-                CHECK(!"Unknown compression format.");
-            }
-
-            mOutputFormat->setInt32(kKeyWidth, video_def->nFrameWidth);
-            mOutputFormat->setInt32(kKeyHeight, video_def->nFrameHeight);
-            mOutputFormat->setInt32(kKeyColorFormat, video_def->eColorFormat);
-
-            if (!mIsEncoder) {
-                OMX_CONFIG_RECTTYPE rect;
-                InitOMXParams(&rect);
-                rect.nPortIndex = kPortIndexOutput;
-                status_t err =
-                        mOMX->getConfig(
-                            mNode, OMX_IndexConfigCommonOutputCrop,
-                            &rect, sizeof(rect));
-
-                CODEC_LOGI("video dimensions are %u x %u",
-                        video_def->nFrameWidth, video_def->nFrameHeight);
-
-                if (err == OK) {
-                    CHECK_GE(rect.nLeft, 0);
-                    CHECK_GE(rect.nTop, 0);
-                    CHECK_GE(rect.nWidth, 0u);
-                    CHECK_GE(rect.nHeight, 0u);
-                    CHECK_LE(rect.nLeft + rect.nWidth - 1, video_def->nFrameWidth);
-                    CHECK_LE(rect.nTop + rect.nHeight - 1, video_def->nFrameHeight);
-
-                    mOutputFormat->setRect(
-                            kKeyCropRect,
-                            rect.nLeft,
-                            rect.nTop,
-                            rect.nLeft + rect.nWidth - 1,
-                            rect.nTop + rect.nHeight - 1);
-
-                    CODEC_LOGI("Crop rect is %u x %u @ (%d, %d)",
-                            rect.nWidth, rect.nHeight, rect.nLeft, rect.nTop);
-                } else {
-                    mOutputFormat->setRect(
-                            kKeyCropRect,
-                            0, 0,
-                            video_def->nFrameWidth - 1,
-                            video_def->nFrameHeight - 1);
-                }
-
-                if (mNativeWindow != NULL) {
-                     initNativeWindowCrop();
-                }
-            }
-            break;
-        }
-
-        default:
-        {
-            CHECK(!"should not be here, neither audio nor video.");
-            break;
-        }
-    }
-
-    // If the input format contains rotation information, flag the output
-    // format accordingly.
-
-    int32_t rotationDegrees;
-    if (mSource->getFormat()->findInt32(kKeyRotation, &rotationDegrees)) {
-        mOutputFormat->setInt32(kKeyRotation, rotationDegrees);
-    }
-}
-
-status_t OMXCodec::pause() {
-    Mutex::Autolock autoLock(mLock);
-
-    mPaused = true;
-
-    return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-status_t QueryCodecs(
-        const sp<IOMX> &omx,
-        const char *mime, bool queryDecoders, bool hwCodecOnly,
-        Vector<CodecCapabilities> *results) {
-    Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs;
-    results->clear();
-
-    OMXCodec::findMatchingCodecs(mime,
-            !queryDecoders /*createEncoder*/,
-            NULL /*matchComponentName*/,
-            hwCodecOnly ? OMXCodec::kHardwareCodecsOnly : 0 /*flags*/,
-            &matchingCodecs);
-
-    for (size_t c = 0; c < matchingCodecs.size(); c++) {
-        const char *componentName = matchingCodecs.itemAt(c).mName.string();
-
-        results->push();
-        CodecCapabilities *caps = &results->editItemAt(results->size() - 1);
-
-        status_t err =
-            QueryCodec(omx, componentName, mime, !queryDecoders, caps);
-
-        if (err != OK) {
-            results->removeAt(results->size() - 1);
-        }
-    }
-
-    return OK;
-}
-
-status_t QueryCodec(
-        const sp<IOMX> &omx,
-        const char *componentName, const char *mime,
-        bool isEncoder,
-        CodecCapabilities *caps) {
-    bool isVideo = !strncasecmp(mime, "video/", 6);
-
-    sp<OMXCodecObserver> observer = new OMXCodecObserver;
-    IOMX::node_id node;
-    status_t err = omx->allocateNode(componentName, observer, &node);
-
-    if (err != OK) {
-        return err;
-    }
-
-    OMXCodec::setComponentRole(omx, node, isEncoder, mime);
-
-    caps->mFlags = 0;
-    caps->mComponentName = componentName;
-
-    // NOTE: OMX does not provide a way to query AAC profile support
-    if (isVideo) {
-        OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
-        InitOMXParams(&param);
-
-        param.nPortIndex = !isEncoder ? 0 : 1;
-
-        for (param.nProfileIndex = 0;; ++param.nProfileIndex) {
-            err = omx->getParameter(
-                    node, OMX_IndexParamVideoProfileLevelQuerySupported,
-                    &param, sizeof(param));
-
-            if (err != OK) {
-                break;
-            }
-
-            CodecProfileLevel profileLevel;
-            profileLevel.mProfile = param.eProfile;
-            profileLevel.mLevel = param.eLevel;
-
-            caps->mProfileLevels.push(profileLevel);
-        }
-
-        // Color format query
-        // return colors in the order reported by the OMX component
-        // prefix "flexible" standard ones with the flexible equivalent
-        OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
-        InitOMXParams(&portFormat);
-        portFormat.nPortIndex = !isEncoder ? 1 : 0;
-        for (portFormat.nIndex = 0;; ++portFormat.nIndex)  {
-            err = omx->getParameter(
-                    node, OMX_IndexParamVideoPortFormat,
-                    &portFormat, sizeof(portFormat));
-            if (err != OK) {
-                break;
-            }
-
-            OMX_U32 flexibleEquivalent;
-            if (ACodec::isFlexibleColorFormat(
-                        omx, node, portFormat.eColorFormat, false /* usingNativeWindow */,
-                        &flexibleEquivalent)) {
-                bool marked = false;
-                for (size_t i = 0; i < caps->mColorFormats.size(); i++) {
-                    if (caps->mColorFormats.itemAt(i) == flexibleEquivalent) {
-                        marked = true;
-                        break;
-                    }
-                }
-                if (!marked) {
-                    caps->mColorFormats.push(flexibleEquivalent);
-                }
-            }
-            caps->mColorFormats.push(portFormat.eColorFormat);
-        }
-    }
-
-    if (isVideo && !isEncoder) {
-        if (omx->storeMetaDataInBuffers(
-                    node, 1 /* port index */, OMX_TRUE) == OK ||
-            omx->prepareForAdaptivePlayback(
-                    node, 1 /* port index */, OMX_TRUE,
-                    1280 /* width */, 720 /* height */) == OK) {
-            caps->mFlags |= CodecCapabilities::kFlagSupportsAdaptivePlayback;
-        }
-    }
-
-    CHECK_EQ(omx->freeNode(node), (status_t)OK);
-
-    return OK;
-}
-
-status_t QueryCodecs(
-        const sp<IOMX> &omx,
-        const char *mimeType, bool queryDecoders,
-        Vector<CodecCapabilities> *results) {
-    return QueryCodecs(omx, mimeType, queryDecoders, false /*hwCodecOnly*/, results);
-}
-
-// These are supposed be equivalent to the logic in
-// "audio_channel_out_mask_from_count".
-status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]) {
-    switch (numChannels) {
-        case 1:
-            map[0] = OMX_AUDIO_ChannelCF;
-            break;
-        case 2:
-            map[0] = OMX_AUDIO_ChannelLF;
-            map[1] = OMX_AUDIO_ChannelRF;
-            break;
-        case 3:
-            map[0] = OMX_AUDIO_ChannelLF;
-            map[1] = OMX_AUDIO_ChannelRF;
-            map[2] = OMX_AUDIO_ChannelCF;
-            break;
-        case 4:
-            map[0] = OMX_AUDIO_ChannelLF;
-            map[1] = OMX_AUDIO_ChannelRF;
-            map[2] = OMX_AUDIO_ChannelLR;
-            map[3] = OMX_AUDIO_ChannelRR;
-            break;
-        case 5:
-            map[0] = OMX_AUDIO_ChannelLF;
-            map[1] = OMX_AUDIO_ChannelRF;
-            map[2] = OMX_AUDIO_ChannelCF;
-            map[3] = OMX_AUDIO_ChannelLR;
-            map[4] = OMX_AUDIO_ChannelRR;
-            break;
-        case 6:
-            map[0] = OMX_AUDIO_ChannelLF;
-            map[1] = OMX_AUDIO_ChannelRF;
-            map[2] = OMX_AUDIO_ChannelCF;
-            map[3] = OMX_AUDIO_ChannelLFE;
-            map[4] = OMX_AUDIO_ChannelLR;
-            map[5] = OMX_AUDIO_ChannelRR;
-            break;
-        case 7:
-            map[0] = OMX_AUDIO_ChannelLF;
-            map[1] = OMX_AUDIO_ChannelRF;
-            map[2] = OMX_AUDIO_ChannelCF;
-            map[3] = OMX_AUDIO_ChannelLFE;
-            map[4] = OMX_AUDIO_ChannelLR;
-            map[5] = OMX_AUDIO_ChannelRR;
-            map[6] = OMX_AUDIO_ChannelCS;
-            break;
-        case 8:
-            map[0] = OMX_AUDIO_ChannelLF;
-            map[1] = OMX_AUDIO_ChannelRF;
-            map[2] = OMX_AUDIO_ChannelCF;
-            map[3] = OMX_AUDIO_ChannelLFE;
-            map[4] = OMX_AUDIO_ChannelLR;
-            map[5] = OMX_AUDIO_ChannelRR;
-            map[6] = OMX_AUDIO_ChannelLS;
-            map[7] = OMX_AUDIO_ChannelRS;
-            break;
-        default:
-            return -EINVAL;
-    }
-
-    return OK;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 578171f..37e8e9c 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -179,6 +179,9 @@
 
 protected:
     virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const {
+        if (granulePos > INT64_MAX / 1000000ll) {
+            return INT64_MAX;
+        }
         return granulePos * 1000000ll / mVi.rate;
     }
 
@@ -710,6 +713,7 @@
                     packetSize);
 
             if (n < (ssize_t)packetSize) {
+                buffer->release();
                 ALOGV("failed to read %zu bytes at %#016llx, got %zd bytes",
                         packetSize, (long long)dataOffset, n);
                 return ERROR_IO;
@@ -771,8 +775,13 @@
             return n < 0 ? n : (status_t)ERROR_END_OF_STREAM;
         }
 
-        mCurrentPageSamples =
-            mCurrentPage.mGranulePosition - mPrevGranulePosition;
+        // Prevent a harmless unsigned integer overflow by clamping to 0
+        if (mCurrentPage.mGranulePosition >= mPrevGranulePosition) {
+            mCurrentPageSamples =
+                    mCurrentPage.mGranulePosition - mPrevGranulePosition;
+        } else {
+            mCurrentPageSamples = 0;
+        }
         mFirstPacketInPage = true;
 
         mPrevGranulePosition = mCurrentPage.mGranulePosition;
@@ -917,6 +926,9 @@
     if (granulePos > mCodecDelay) {
         pcmSamplePosition = granulePos - mCodecDelay;
     }
+    if (pcmSamplePosition > INT64_MAX / 1000000ll) {
+        return INT64_MAX;
+    }
     return pcmSamplePosition * 1000000ll / kOpusSampleRate;
 }
 
@@ -954,7 +966,7 @@
     mMeta->setInt32(kKeyChannelCount, mChannelCount);
     mMeta->setInt64(kKeyOpusSeekPreRoll /* ns */, kOpusSeekPreRollUs * 1000 /* = 80 ms*/);
     mMeta->setInt64(kKeyOpusCodecDelay /* ns */,
-            mCodecDelay /* sample/s */ * 1000000000 / kOpusSampleRate);
+            mCodecDelay /* sample/s */ * 1000000000ll / kOpusSampleRate);
 
     return OK;
 }
@@ -1254,17 +1266,17 @@
         return;
     }
 
-    descLen = U32_AT(&flac[8 + typeLen]);
+    if (flacSize < 32 || flacSize - 32 < typeLen) {
+        return;
+    }
 
-    if (flacSize < 32 ||
-        flacSize - 32 < typeLen ||
-        flacSize - 32 - typeLen < descLen) {
+    descLen = U32_AT(&flac[8 + typeLen]);
+    if (flacSize - 32 - typeLen < descLen) {
         return;
     }
 
     dataLen = U32_AT(&flac[8 + typeLen + 4 + descLen + 16]);
 
-
     // we've already checked above that (flacSize - 32 - typeLen - descLen) >= 0
     if (flacSize - 32 - typeLen - descLen < dataLen) {
         return;
@@ -1314,7 +1326,7 @@
     return mInitCheck != OK ? 0 : 1;
 }
 
-sp<MediaSource> OggExtractor::getTrack(size_t index) {
+sp<IMediaSource> OggExtractor::getTrack(size_t index) {
     if (index >= 1) {
         return NULL;
     }
diff --git a/media/libstagefright/ProcessInfo.cpp b/media/libstagefright/ProcessInfo.cpp
index b4172b3..27f1a79 100644
--- a/media/libstagefright/ProcessInfo.cpp
+++ b/media/libstagefright/ProcessInfo.cpp
@@ -20,6 +20,7 @@
 
 #include <media/stagefright/ProcessInfo.h>
 
+#include <binder/IPCThreadState.h>
 #include <binder/IProcessInfoService.h>
 #include <binder/IServiceManager.h>
 
@@ -32,22 +33,32 @@
     sp<IProcessInfoService> service = interface_cast<IProcessInfoService>(binder);
 
     size_t length = 1;
-    int32_t states;
-    status_t err = service->getProcessStatesFromPids(length, &pid, &states);
+    int32_t state;
+    static const int32_t INVALID_ADJ = -10000;
+    static const int32_t NATIVE_ADJ = -1000;
+    int32_t score = INVALID_ADJ;
+    status_t err = service->getProcessStatesAndOomScoresFromPids(length, &pid, &state, &score);
     if (err != OK) {
-        ALOGE("getProcessStatesFromPids failed");
+        ALOGE("getProcessStatesAndOomScoresFromPids failed");
         return false;
     }
-    ALOGV("pid %d states %d", pid, states);
-    if (states < 0) {
+    ALOGV("pid %d state %d score %d", pid, state, score);
+    if (score <= NATIVE_ADJ) {
+        ALOGE("pid %d invalid OOM adjustments value %d", pid, score);
         return false;
     }
 
-    // Use process state as the priority. Lower the value, higher the priority.
-    *priority = states;
+    // Use OOM adjustments value as the priority. Lower the value, higher the priority.
+    *priority = score;
     return true;
 }
 
+bool ProcessInfo::isValidPid(int pid) {
+    int callingPid = IPCThreadState::self()->getCallingPid();
+    // Trust it if this is called from the same process otherwise pid has to match the calling pid.
+    return (callingPid == getpid()) || (callingPid == pid);
+}
+
 ProcessInfo::~ProcessInfo() {}
 
 }  // namespace android
diff --git a/media/libstagefright/SampleIterator.cpp b/media/libstagefright/SampleIterator.cpp
index c5f359e..ad7b6fd 100644
--- a/media/libstagefright/SampleIterator.cpp
+++ b/media/libstagefright/SampleIterator.cpp
@@ -171,6 +171,13 @@
         if (mSampleToChunkIndex + 1 < mTable->mNumSampleToChunkOffsets) {
             mStopChunk = entry[1].startChunk;
 
+            if (mStopChunk < mFirstChunk ||
+                (mStopChunk - mFirstChunk) > UINT32_MAX / mSamplesPerChunk ||
+                ((mStopChunk - mFirstChunk) * mSamplesPerChunk >
+                 UINT32_MAX - mFirstChunkSampleIndex)) {
+
+                return ERROR_OUT_OF_RANGE;
+            }
             mStopChunkSampleIndex =
                 mFirstChunkSampleIndex
                     + (mStopChunk - mFirstChunk) * mSamplesPerChunk;
@@ -313,7 +320,18 @@
 
     *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
 
-    *time += mTable->getCompositionTimeOffset(sampleIndex);
+    int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
+    if ((offset < 0 && *time < (offset == INT32_MIN ?
+            INT32_MAX : uint32_t(-offset))) ||
+            (offset > 0 && *time > UINT32_MAX - offset)) {
+        ALOGE("%u + %d would overflow", *time, offset);
+        return ERROR_OUT_OF_RANGE;
+    }
+    if (offset > 0) {
+        *time += offset;
+    } else {
+        *time -= (offset == INT32_MIN ? INT32_MAX : (-offset));
+    }
 
     *duration = mTTSDuration;
 
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 11e49c8..c9c49f5 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -49,14 +49,14 @@
     CompositionDeltaLookup();
 
     void setEntries(
-            const uint32_t *deltaEntries, size_t numDeltaEntries);
+            const int32_t *deltaEntries, size_t numDeltaEntries);
 
-    uint32_t getCompositionTimeOffset(uint32_t sampleIndex);
+    int32_t getCompositionTimeOffset(uint32_t sampleIndex);
 
 private:
     Mutex mLock;
 
-    const uint32_t *mDeltaEntries;
+    const int32_t *mDeltaEntries;
     size_t mNumDeltaEntries;
 
     size_t mCurrentDeltaEntry;
@@ -73,7 +73,7 @@
 }
 
 void SampleTable::CompositionDeltaLookup::setEntries(
-        const uint32_t *deltaEntries, size_t numDeltaEntries) {
+        const int32_t *deltaEntries, size_t numDeltaEntries) {
     Mutex::Autolock autolock(mLock);
 
     mDeltaEntries = deltaEntries;
@@ -82,7 +82,7 @@
     mCurrentEntrySampleIndex = 0;
 }
 
-uint32_t SampleTable::CompositionDeltaLookup::getCompositionTimeOffset(
+int32_t SampleTable::CompositionDeltaLookup::getCompositionTimeOffset(
         uint32_t sampleIndex) {
     Mutex::Autolock autolock(mLock);
 
@@ -196,11 +196,11 @@
     mNumChunkOffsets = U32_AT(&header[4]);
 
     if (mChunkOffsetType == kChunkOffsetType32) {
-        if (data_size < 8 + mNumChunkOffsets * 4) {
+      if ((data_size - 8) / 4 < mNumChunkOffsets) {
             return ERROR_MALFORMED;
         }
     } else {
-        if (data_size < 8 + mNumChunkOffsets * 8) {
+      if ((data_size - 8) / 8 < mNumChunkOffsets) {
             return ERROR_MALFORMED;
         }
     }
@@ -233,7 +233,7 @@
 
     mNumSampleToChunkOffsets = U32_AT(&header[4]);
 
-    if (data_size < 8 + mNumSampleToChunkOffsets * 12) {
+    if ((data_size - 8) / 12 < mNumSampleToChunkOffsets) {
         return ERROR_MALFORMED;
     }
 
@@ -277,6 +277,11 @@
 
     for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
         uint8_t buffer[12];
+
+        if ((off64_t)(SIZE_MAX - 8 - (i * 12)) < mSampleToChunkOffset) {
+            return ERROR_MALFORMED;
+        }
+
         if (mDataSource->readAt(
                     mSampleToChunkOffset + 8 + i * 12, buffer, sizeof(buffer))
                 != (ssize_t)sizeof(buffer)) {
@@ -428,6 +433,10 @@
     return OK;
 }
 
+// NOTE: per 14996-12, version 0 ctts contains unsigned values, while version 1
+// contains signed values, however some software creates version 0 files that
+// contain signed values, so we're always treating the values as signed,
+// regardless of version.
 status_t SampleTable::setCompositionTimeToSampleParams(
         off64_t data_offset, size_t data_size) {
     ALOGI("There are reordered frames present.");
@@ -443,19 +452,23 @@
         return ERROR_IO;
     }
 
-    if (U32_AT(header) != 0) {
-        // Expected version = 0, flags = 0.
+    uint32_t flags = U32_AT(header);
+    uint32_t version = flags >> 24;
+    flags &= 0xffffff;
+
+    if ((version != 0 && version != 1) || flags != 0) {
+        // Expected version = 0 or 1, flags = 0.
         return ERROR_MALFORMED;
     }
 
     size_t numEntries = U32_AT(&header[4]);
 
-    if (data_size != (numEntries + 1) * 8) {
+    if (((SIZE_MAX / 8) - 1 < numEntries) || (data_size != (numEntries + 1) * 8)) {
         return ERROR_MALFORMED;
     }
 
     mNumCompositionTimeDeltaEntries = numEntries;
-    uint64_t allocSize = (uint64_t)numEntries * 2 * sizeof(uint32_t);
+    uint64_t allocSize = (uint64_t)numEntries * 2 * sizeof(int32_t);
     if (allocSize > SIZE_MAX) {
         ALOGE("Composition-time-to-sample table size too large.");
         return ERROR_OUT_OF_RANGE;
@@ -473,7 +486,7 @@
         return ERROR_OUT_OF_RANGE;
     }
 
-    mCompositionTimeDeltaEntries = new (std::nothrow) uint32_t[2 * numEntries];
+    mCompositionTimeDeltaEntries = new (std::nothrow) int32_t[2 * numEntries];
     if (!mCompositionTimeDeltaEntries) {
         ALOGE("Cannot allocate composition-time-to-sample table with %llu "
                 "entries.", (unsigned long long)numEntries);
@@ -650,12 +663,28 @@
 
                 mSampleTimeEntries[sampleIndex].mSampleIndex = sampleIndex;
 
-                uint32_t compTimeDelta =
+                int32_t compTimeDelta =
                     mCompositionDeltaLookup->getCompositionTimeOffset(
                             sampleIndex);
 
+                if ((compTimeDelta < 0 && sampleTime <
+                        (compTimeDelta == INT32_MIN ?
+                                INT32_MAX : uint32_t(-compTimeDelta)))
+                        || (compTimeDelta > 0 &&
+                                sampleTime > UINT32_MAX - compTimeDelta)) {
+                    ALOGE("%u + %d would overflow, clamping",
+                            sampleTime, compTimeDelta);
+                    if (compTimeDelta < 0) {
+                        sampleTime = 0;
+                    } else {
+                        sampleTime = UINT32_MAX;
+                    }
+                    compTimeDelta = 0;
+                }
+
                 mSampleTimeEntries[sampleIndex].mCompositionTime =
-                    sampleTime + compTimeDelta;
+                        compTimeDelta > 0 ? sampleTime + compTimeDelta:
+                                sampleTime - (-compTimeDelta);
             }
 
             ++sampleIndex;
@@ -945,7 +974,7 @@
     return OK;
 }
 
-uint32_t SampleTable::getCompositionTimeOffset(uint32_t sampleIndex) {
+int32_t SampleTable::getCompositionTimeOffset(uint32_t sampleIndex) {
     return mCompositionDeltaLookup->getCompositionTimeOffset(sampleIndex);
 }
 
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
new file mode 100644
index 0000000..1b44a00
--- /dev/null
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gui/Surface.h>
+
+#include <media/ICrypto.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/SimpleDecodingSource.h>
+#include <media/stagefright/Utils.h>
+
+using namespace android;
+
+const int64_t kTimeoutWaitForOutputUs = 500000; // 0.5 seconds
+const int64_t kTimeoutWaitForInputUs = 5000; // 5 milliseconds
+
+//static
+sp<SimpleDecodingSource> SimpleDecodingSource::Create(
+        const sp<IMediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow,
+        const char *desiredCodec) {
+    sp<Surface> surface = static_cast<Surface*>(nativeWindow.get());
+    const char *mime = NULL;
+    sp<MetaData> meta = source->getFormat();
+    CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+    sp<AMessage> format = new AMessage;
+    convertMetaDataToMessage(source->getFormat(), &format);
+
+    Vector<AString> matchingCodecs;
+    MediaCodecList::findMatchingCodecs(
+            mime, false /* encoder */, flags, &matchingCodecs);
+
+    sp<ALooper> looper = new ALooper;
+    looper->setName("stagefright");
+    looper->start();
+
+    sp<MediaCodec> codec;
+
+    for (size_t i = 0; i < matchingCodecs.size(); ++i) {
+        const AString &componentName = matchingCodecs[i];
+        if (desiredCodec != NULL && componentName.compare(desiredCodec)) {
+            continue;
+        }
+
+        ALOGV("Attempting to allocate codec '%s'", componentName.c_str());
+
+        codec = MediaCodec::CreateByComponentName(looper, componentName);
+        if (codec != NULL) {
+            ALOGI("Successfully allocated codec '%s'", componentName.c_str());
+
+            status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
+            if (err == OK) {
+                err = codec->getOutputFormat(&format);
+            }
+            if (err == OK) {
+                return new SimpleDecodingSource(codec, source, looper, surface != NULL, format);
+            }
+
+            ALOGD("Failed to configure codec '%s'", componentName.c_str());
+            codec->release();
+            codec = NULL;
+        }
+    }
+
+    looper->stop();
+    ALOGE("No matching decoder! (mime: %s)", mime);
+    return NULL;
+}
+
+SimpleDecodingSource::SimpleDecodingSource(
+        const sp<MediaCodec> &codec, const sp<IMediaSource> &source, const sp<ALooper> &looper,
+        bool usingSurface, const sp<AMessage> &format)
+    : mCodec(codec),
+      mSource(source),
+      mLooper(looper),
+      mUsingSurface(usingSurface),
+      mProtectedState(format) {
+    mCodec->getName(&mComponentName);
+}
+
+SimpleDecodingSource::~SimpleDecodingSource() {
+    mCodec->release();
+    mLooper->stop();
+}
+
+status_t SimpleDecodingSource::start(MetaData *params) {
+    (void)params;
+    Mutexed<ProtectedState>::Locked me(mProtectedState);
+    if (me->mState != INIT) {
+        return -EINVAL;
+    }
+    status_t res = mCodec->start();
+    if (res == OK) {
+        res = mSource->start();
+    }
+
+    if (res == OK) {
+        me->mState = STARTED;
+        me->mQueuedInputEOS = false;
+        me->mGotOutputEOS = false;
+    } else {
+        me->mState = ERROR;
+    }
+
+    return res;
+}
+
+status_t SimpleDecodingSource::stop() {
+    Mutexed<ProtectedState>::Locked me(mProtectedState);
+    if (me->mState != STARTED) {
+        return -EINVAL;
+    }
+
+    // wait for any pending reads to complete
+    me->mState = STOPPING;
+    while (me->mReading) {
+        me.waitForCondition(me->mReadCondition);
+    }
+
+    status_t res1 = mCodec->stop();
+    if (res1 != OK) {
+        mCodec->release();
+    }
+    status_t res2 = mSource->stop();
+    if (res1 == OK && res2 == OK) {
+        me->mState = STOPPED;
+    } else {
+        me->mState = ERROR;
+    }
+    return res1 != OK ? res1 : res2;
+}
+
+sp<MetaData> SimpleDecodingSource::getFormat() {
+    Mutexed<ProtectedState>::Locked me(mProtectedState);
+    if (me->mState == STARTED || me->mState == INIT) {
+        sp<MetaData> meta = new MetaData();
+        convertMessageToMetaData(me->mFormat, meta);
+        return meta;
+    }
+    return NULL;
+}
+
+SimpleDecodingSource::ProtectedState::ProtectedState(const sp<AMessage> &format)
+    : mReading(false),
+      mFormat(format),
+      mState(INIT),
+      mQueuedInputEOS(false),
+      mGotOutputEOS(false) {
+}
+
+status_t SimpleDecodingSource::read(
+        MediaBuffer **buffer, const ReadOptions *options) {
+    *buffer = NULL;
+
+    Mutexed<ProtectedState>::Locked me(mProtectedState);
+    if (me->mState != STARTED) {
+        return ERROR_END_OF_STREAM;
+    }
+    me->mReading = true;
+
+    status_t res = doRead(me, buffer, options);
+
+    me.lock();
+    me->mReading = false;
+    if (me->mState != STARTED) {
+        me->mReadCondition.signal();
+    }
+
+    return res;
+}
+
+status_t SimpleDecodingSource::doRead(
+        Mutexed<ProtectedState>::Locked &me, MediaBuffer **buffer, const ReadOptions *options) {
+    // |me| is always locked on entry, but is allowed to be unlocked on exit
+    CHECK_EQ(me->mState, STARTED);
+
+    size_t out_ix, in_ix, out_offset, out_size;
+    int64_t out_pts;
+    uint32_t out_flags;
+    status_t res;
+
+    // flush codec on seek
+    IMediaSource::ReadOptions::SeekMode mode;
+    if (options != NULL && options->getSeekTo(&out_pts, &mode)) {
+        me->mQueuedInputEOS = false;
+        me->mGotOutputEOS = false;
+        mCodec->flush();
+    }
+
+    if (me->mGotOutputEOS) {
+        return ERROR_END_OF_STREAM;
+    }
+
+    for (int retries = 0; ++retries; ) {
+        // If we fill all available input buffers, we should expect that
+        // the codec produces at least one output buffer. Also, the codec
+        // should produce an output buffer in at most 1 seconds. Retry a
+        // few times nonetheless.
+        while (!me->mQueuedInputEOS) {
+            // allow some time to get input buffer after flush
+            res = mCodec->dequeueInputBuffer(&in_ix, kTimeoutWaitForInputUs);
+            if (res == -EAGAIN) {
+                // no available input buffers
+                break;
+            }
+
+            sp<ABuffer> in_buffer;
+            if (res == OK) {
+                res = mCodec->getInputBuffer(in_ix, &in_buffer);
+            }
+
+            if (res != OK || in_buffer == NULL) {
+                ALOGW("[%s] could not get input buffer #%zu",
+                        mComponentName.c_str(), in_ix);
+                me->mState = ERROR;
+                return UNKNOWN_ERROR;
+            }
+
+            MediaBuffer *in_buf;
+            while (true) {
+                in_buf = NULL;
+                me.unlock();
+                res = mSource->read(&in_buf, options);
+                me.lock();
+                if (res != OK || me->mState != STARTED) {
+                    if (in_buf != NULL) {
+                        in_buf->release();
+                        in_buf = NULL;
+                    }
+
+                    // queue EOS
+                    me->mQueuedInputEOS = true;
+                    if (mCodec->queueInputBuffer(
+                                 in_ix, 0 /* offset */, 0 /* size */,
+                                 0 /* pts */, MediaCodec::BUFFER_FLAG_EOS) != OK) {
+                        ALOGI("[%s] failed to queue input EOS", mComponentName.c_str());
+                        me->mState = ERROR;
+                        return UNKNOWN_ERROR;
+                    }
+
+                    // don't stop on EOS, but report error or EOS on stop
+                    if (res != ERROR_END_OF_STREAM) {
+                        me->mState = ERROR;
+                        return res;
+                    }
+                    if (me->mState != STARTED) {
+                        return ERROR_END_OF_STREAM;
+                    }
+                    break;
+                }
+                if (in_buf == NULL) { // should not happen
+                    continue;
+                } else if (in_buf->range_length() != 0) {
+                    break;
+                }
+                in_buf->release();
+            }
+
+            if (in_buf != NULL) {
+                int64_t timestampUs = 0;
+                CHECK(in_buf->meta_data()->findInt64(kKeyTime, &timestampUs));
+                if (in_buf->range_length() > in_buffer->capacity()) {
+                    ALOGW("'%s' received %zu input bytes for buffer of size %zu",
+                            mComponentName.c_str(),
+                            in_buf->range_length(), in_buffer->capacity());
+                }
+                memcpy(in_buffer->base(), (uint8_t *)in_buf->data() + in_buf->range_offset(),
+                       min(in_buf->range_length(), in_buffer->capacity()));
+
+                res = mCodec->queueInputBuffer(
+                        in_ix, 0 /* offset */, in_buf->range_length(),
+                        timestampUs, 0 /* flags */);
+                if (res != OK) {
+                    ALOGI("[%s] failed to queue input buffer #%zu", mComponentName.c_str(), in_ix);
+                    me->mState = ERROR;
+                }
+                in_buf->release();
+            }
+        }
+
+        me.unlock();
+        res = mCodec->dequeueOutputBuffer(
+                &out_ix, &out_offset, &out_size, &out_pts,
+                &out_flags, kTimeoutWaitForOutputUs /* timeoutUs */);
+        me.lock();
+        // abort read on stop
+        if (me->mState != STARTED) {
+            if (res == OK) {
+                mCodec->releaseOutputBuffer(out_ix);
+            }
+            return ERROR_END_OF_STREAM;
+        }
+
+        if (res == -EAGAIN) {
+            ALOGD("[%s] did not produce an output buffer. retry count: %d",
+                  mComponentName.c_str(), retries);
+            continue;
+        } else if (res == INFO_FORMAT_CHANGED) {
+            if (mCodec->getOutputFormat(&me->mFormat) != OK) {
+                me->mState = ERROR;
+                res = UNKNOWN_ERROR;
+            }
+            return res;
+        } else if (res == INFO_OUTPUT_BUFFERS_CHANGED) {
+            ALOGV("output buffers changed");
+            continue;
+        } else if (res != OK) {
+            me->mState = ERROR;
+            return res;
+        }
+
+        sp<ABuffer> out_buffer;
+        res = mCodec->getOutputBuffer(out_ix, &out_buffer);
+        if (res != OK) {
+            ALOGW("[%s] could not get output buffer #%zu",
+                    mComponentName.c_str(), out_ix);
+            me->mState = ERROR;
+            return UNKNOWN_ERROR;
+        }
+        if (out_flags & MediaCodec::BUFFER_FLAG_EOS) {
+            me->mGotOutputEOS = true;
+            // return EOS immediately if last buffer is empty
+            if (out_size == 0) {
+                mCodec->releaseOutputBuffer(out_ix);
+                return ERROR_END_OF_STREAM;
+            }
+        }
+
+        if (mUsingSurface && out_size > 0) {
+            *buffer = new MediaBuffer(0);
+            mCodec->renderOutputBufferAndRelease(out_ix);
+        } else {
+            *buffer = new MediaBuffer(out_size);
+            CHECK_LE(out_buffer->size(), (*buffer)->size());
+            memcpy((*buffer)->data(), out_buffer->data(), out_buffer->size());
+            (*buffer)->meta_data()->setInt64(kKeyTime, out_pts);
+            mCodec->releaseOutputBuffer(out_ix);
+        }
+        return OK;
+    }
+
+    return TIMED_OUT;
+}
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index 1da1e5e..d30be88 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -24,21 +24,32 @@
 
 namespace android {
 
-SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut) {
+SkipCutBuffer::SkipCutBuffer(size_t skip, size_t cut, size_t num16BitChannels) {
 
-    if (skip < 0 || cut < 0 || cut > 64 * 1024) {
-        ALOGW("out of range skip/cut: %d/%d, using passthrough instead", skip, cut);
-        skip = 0;
-        cut = 0;
+    mWriteHead = 0;
+    mReadHead = 0;
+    mCapacity = 0;
+    mCutBuffer = NULL;
+
+    if (num16BitChannels == 0 || num16BitChannels > INT32_MAX / 2) {
+        ALOGW("# channels out of range: %zu, using passthrough instead", num16BitChannels);
+        return;
     }
+    size_t frameSize = num16BitChannels * 2;
+    if (skip > INT32_MAX / frameSize || cut > INT32_MAX / frameSize
+            || cut * frameSize > INT32_MAX - 4096) {
+        ALOGW("out of range skip/cut: %zu/%zu, using passthrough instead",
+                skip, cut);
+        return;
+    }
+    skip *= frameSize;
+    cut *= frameSize;
 
     mFrontPadding = mSkip = skip;
     mBackPadding = cut;
-    mWriteHead = 0;
-    mReadHead = 0;
     mCapacity = cut + 4096;
-    mCutBuffer = new char[mCapacity];
-    ALOGV("skipcutbuffer %d %d %d", skip, cut, mCapacity);
+    mCutBuffer = new (std::nothrow) char[mCapacity];
+    ALOGV("skipcutbuffer %zu %zu %d", skip, cut, mCapacity);
 }
 
 SkipCutBuffer::~SkipCutBuffer() {
@@ -46,6 +57,11 @@
 }
 
 void SkipCutBuffer::submit(MediaBuffer *buffer) {
+    if (mCutBuffer == NULL) {
+        // passthrough mode
+        return;
+    }
+
     int32_t offset = buffer->range_offset();
     int32_t buflen = buffer->range_length();
 
@@ -73,6 +89,11 @@
 }
 
 void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
+    if (mCutBuffer == NULL) {
+        // passthrough mode
+        return;
+    }
+
     int32_t offset = buffer->offset();
     int32_t buflen = buffer->size();
 
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index e37e909..377f5fd 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -22,6 +22,7 @@
 #include <utils/Log.h>
 #include <gui/Surface.h>
 
+#include "include/avc_utils.h"
 #include "include/StagefrightMetadataRetriever.h"
 
 #include <media/ICrypto.h>
@@ -35,11 +36,12 @@
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXCodec.h>
 #include <media/stagefright/Utils.h>
 
 #include <CharacterEncodingDetector.h>
@@ -55,13 +57,14 @@
     ALOGV("StagefrightMetadataRetriever()");
 
     DataSource::RegisterDefaultSniffers();
-    CHECK_EQ(mClient.connect(), (status_t)OK);
 }
 
 StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
     ALOGV("~StagefrightMetadataRetriever()");
     clearMetadata();
-    mClient.disconnect();
+    if (mSource != NULL) {
+        mSource->close();
+    }
 }
 
 status_t StagefrightMetadataRetriever::setDataSource(
@@ -137,9 +140,9 @@
 }
 
 static VideoFrame *extractVideoFrame(
-        const char *componentName,
+        const AString &componentName,
         const sp<MetaData> &trackMeta,
-        const sp<MediaSource> &source,
+        const sp<IMediaSource> &source,
         int64_t frameTimeUs,
         int seekMode) {
 
@@ -147,6 +150,7 @@
 
     sp<AMessage> videoFormat;
     if (convertMetaDataToMessage(trackMeta, &videoFormat) != OK) {
+        ALOGE("b/23680780");
         ALOGW("Failed to convert meta data to message");
         return NULL;
     }
@@ -161,7 +165,7 @@
             looper, componentName, &err);
 
     if (decoder.get() == NULL || err != OK) {
-        ALOGW("Failed to instantiate decoder [%s]", componentName);
+        ALOGW("Failed to instantiate decoder [%s]", componentName.c_str());
         return NULL;
     }
 
@@ -215,6 +219,7 @@
     if (err != OK) {
         ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
         decoder->release();
+        source->stop();
         return NULL;
     }
 
@@ -223,6 +228,7 @@
     if (err != OK) {
         ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
         decoder->release();
+        source->stop();
         return NULL;
     }
 
@@ -232,6 +238,15 @@
     int64_t timeUs;
     size_t retriesLeft = kRetryCount;
     bool done = false;
+    const char *mime;
+    bool success = format->findCString(kKeyMIMEType, &mime);
+    if (!success) {
+        ALOGE("Could not find mime type");
+        return NULL;
+    }
+
+    bool isAvcOrHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+            || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
 
     do {
         size_t inputIndex = -1;
@@ -271,6 +286,11 @@
                 memcpy(codecBuffer->data(),
                         (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
                         mediaBuffer->range_length());
+                if (isAvcOrHevc && IsIDR(codecBuffer)) {
+                    // Only need to decode one IDR frame.
+                    haveMoreInputs = false;
+                    flags |= MediaCodec::BUFFER_FLAG_EOS;
+                }
             }
 
             mediaBuffer->release();
@@ -328,7 +348,6 @@
     if (err != OK || size <= 0 || outputFormat == NULL) {
         ALOGE("Failed to decode thumbnail frame");
         source->stop();
-        decoder->stop();
         decoder->release();
         return NULL;
     }
@@ -401,7 +420,6 @@
     videoFrameBuffer.clear();
     source->stop();
     decoder->releaseOutputBuffer(index);
-    decoder->stop();
     decoder->release();
 
     if (err != OK) {
@@ -458,7 +476,7 @@
     sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
             i, MediaExtractor::kIncludeExtensiveMetaData);
 
-    sp<MediaSource> source = mExtractor->getTrack(i);
+    sp<IMediaSource> source = mExtractor->getTrack(i);
 
     if (source.get() == NULL) {
         ALOGV("unable to instantiate video track.");
@@ -476,23 +494,22 @@
     const char *mime;
     CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
 
-    Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs;
-    OMXCodec::findMatchingCodecs(
+    Vector<AString> matchingCodecs;
+    MediaCodecList::findMatchingCodecs(
             mime,
             false, /* encoder */
-            NULL, /* matchComponentName */
-            OMXCodec::kPreferSoftwareCodecs,
+            MediaCodecList::kPreferSoftwareCodecs,
             &matchingCodecs);
 
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
-        const char *componentName = matchingCodecs[i].mName.string();
+        const AString &componentName = matchingCodecs[i];
         VideoFrame *frame =
             extractVideoFrame(componentName, trackMeta, source, timeUs, option);
 
         if (frame != NULL) {
             return frame;
         }
-        ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName);
+        ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
     }
 
     return NULL;
@@ -661,9 +678,12 @@
                 }
             } else if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
                 const char *lang;
-                trackMeta->findCString(kKeyMediaLanguage, &lang);
-                timedTextLang.append(String8(lang));
-                timedTextLang.append(String8(":"));
+                if (trackMeta->findCString(kKeyMediaLanguage, &lang)) {
+                    timedTextLang.append(String8(lang));
+                    timedTextLang.append(String8(":"));
+                } else {
+                    ALOGE("No language found for timed text");
+                }
             }
         }
     }
@@ -701,7 +721,7 @@
         mMetaData.add(METADATA_KEY_BITRATE, String8(tmp));
     } else {
         off64_t sourceSize;
-        if (mSource->getSize(&sourceSize) == OK) {
+        if (mSource != NULL && mSource->getSize(&sourceSize) == OK) {
             int64_t avgBitRate = (int64_t)(sourceSize * 8E6 / maxDurationUs);
 
             sprintf(tmp, "%" PRId64, avgBitRate);
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index e8abf48..15ff569 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -23,6 +23,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <OMX_IVCommon.h>
+#include <media/hardware/HardwareAPI.h>
 #include <media/hardware/MetadataBufferType.h>
 
 #include <ui/GraphicBuffer.h>
@@ -126,9 +127,9 @@
     return OK;
 }
 
-bool SurfaceMediaSource::isMetaDataStoredInVideoBuffers() const {
+MetadataBufferType SurfaceMediaSource::metaDataStoredInVideoBuffers() const {
     ALOGV("isMetaDataStoredInVideoBuffers");
-    return true;
+    return kMetadataBufferTypeANWBuffer;
 }
 
 int32_t SurfaceMediaSource::getFrameRate( ) const {
@@ -250,29 +251,19 @@
 }
 
 // Pass the data to the MediaBuffer. Pass in only the metadata
-// The metadata passed consists of two parts:
-// 1. First, there is an integer indicating that it is a GRAlloc
-// source (kMetadataBufferTypeGrallocSource)
-// 2. This is followed by the buffer_handle_t that is a handle to the
-// GRalloc buffer. The encoder needs to interpret this GRalloc handle
-// and encode the frames.
-// --------------------------------------------------------------
-// |  kMetadataBufferTypeGrallocSource | sizeof(buffer_handle_t) |
-// --------------------------------------------------------------
 // Note: Call only when you have the lock
-static void passMetadataBuffer(MediaBuffer **buffer,
-        buffer_handle_t bufferHandle) {
-    *buffer = new MediaBuffer(4 + sizeof(buffer_handle_t));
-    char *data = (char *)(*buffer)->data();
+void SurfaceMediaSource::passMetadataBuffer_l(MediaBuffer **buffer,
+        ANativeWindowBuffer *bufferHandle) const {
+    *buffer = new MediaBuffer(sizeof(VideoNativeMetadata));
+    VideoNativeMetadata *data = (VideoNativeMetadata *)(*buffer)->data();
     if (data == NULL) {
         ALOGE("Cannot allocate memory for metadata buffer!");
         return;
     }
-    OMX_U32 type = kMetadataBufferTypeGrallocSource;
-    memcpy(data, &type, 4);
-    memcpy(data + 4, &bufferHandle, sizeof(buffer_handle_t));
-
-    ALOGV("handle = %p, , offset = %zu, length = %zu",
+    data->eType = metaDataStoredInVideoBuffers();
+    data->pBuffer = bufferHandle;
+    data->nFenceFd = -1;
+    ALOGV("handle = %p, offset = %zu, length = %zu",
             bufferHandle, (*buffer)->range_length(), (*buffer)->range_offset());
 }
 
@@ -308,9 +299,9 @@
 
             // First time seeing the buffer?  Added it to the SMS slot
             if (item.mGraphicBuffer != NULL) {
-                mSlots[item.mBuf].mGraphicBuffer = item.mGraphicBuffer;
+                mSlots[item.mSlot].mGraphicBuffer = item.mGraphicBuffer;
             }
-            mSlots[item.mBuf].mFrameNumber = item.mFrameNumber;
+            mSlots[item.mSlot].mFrameNumber = item.mFrameNumber;
 
             // check for the timing of this buffer
             if (mNumFramesReceived == 0 && !mUseAbsoluteTimestamps) {
@@ -320,7 +311,7 @@
                     if (item.mTimestamp < mStartTimeNs) {
                         // This frame predates start of record, discard
                         mConsumer->releaseBuffer(
-                                item.mBuf, item.mFrameNumber, EGL_NO_DISPLAY,
+                                item.mSlot, item.mFrameNumber, EGL_NO_DISPLAY,
                                 EGL_NO_SYNC_KHR, Fence::NO_FENCE);
                         continue;
                     }
@@ -346,13 +337,13 @@
         return ERROR_END_OF_STREAM;
     }
 
-    mCurrentSlot = item.mBuf;
+    mCurrentSlot = item.mSlot;
 
     // First time seeing the buffer?  Added it to the SMS slot
     if (item.mGraphicBuffer != NULL) {
-        mSlots[item.mBuf].mGraphicBuffer = item.mGraphicBuffer;
+        mSlots[item.mSlot].mGraphicBuffer = item.mGraphicBuffer;
     }
-    mSlots[item.mBuf].mFrameNumber = item.mFrameNumber;
+    mSlots[item.mSlot].mFrameNumber = item.mFrameNumber;
 
     mCurrentBuffers.push_back(mSlots[mCurrentSlot].mGraphicBuffer);
     int64_t prevTimeStamp = mCurrentTimestamp;
@@ -361,7 +352,7 @@
     mNumFramesEncoded++;
     // Pass the data to the MediaBuffer. Pass in only the metadata
 
-    passMetadataBuffer(buffer, mSlots[mCurrentSlot].mGraphicBuffer->handle);
+    passMetadataBuffer_l(buffer, mSlots[mCurrentSlot].mGraphicBuffer->getNativeBuffer());
 
     (*buffer)->setObserver(this);
     (*buffer)->add_ref();
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 6b62e43..568837a 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -26,8 +26,25 @@
 
 status_t setNativeWindowSizeFormatAndUsage(
         ANativeWindow *nativeWindow /* nonnull */,
-        int width, int height, int format, int rotation, int usage) {
-    status_t err = native_window_set_buffers_dimensions(nativeWindow, width, height);
+        int width, int height, int format, int rotation, int usage, bool reconnect) {
+    status_t err = NO_ERROR;
+
+    // In some cases we need to reconnect so that we can dequeue all buffers
+    if (reconnect) {
+        err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+        if (err != NO_ERROR) {
+            ALOGE("native_window_api_disconnect failed: %s (%d)", strerror(-err), -err);
+            return err;
+        }
+
+        err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+        if (err != NO_ERROR) {
+            ALOGE("native_window_api_connect failed: %s (%d)", strerror(-err), -err);
+            return err;
+        }
+    }
+
+    err = native_window_set_buffers_dimensions(nativeWindow, width, height);
     if (err != NO_ERROR) {
         ALOGE("native_window_set_buffers_dimensions failed: %s (%d)", strerror(-err), -err);
         return err;
@@ -55,11 +72,17 @@
         return err;
     }
 
+    int consumerUsage = 0;
+    err = nativeWindow->query(nativeWindow, NATIVE_WINDOW_CONSUMER_USAGE_BITS, &consumerUsage);
+    if (err != NO_ERROR) {
+        ALOGW("failed to get consumer usage bits. ignoring");
+        err = NO_ERROR;
+    }
+
     // Make sure to check whether either Stagefright or the video decoder
     // requested protected buffers.
     if (usage & GRALLOC_USAGE_PROTECTED) {
-        // Verify that the ANativeWindow sends images directly to
-        // SurfaceFlinger.
+        // Check if the ANativeWindow sends images directly to SurfaceFlinger.
         int queuesToNativeWindow = 0;
         err = nativeWindow->query(
                 nativeWindow, NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER, &queuesToNativeWindow);
@@ -67,19 +90,14 @@
             ALOGE("error authenticating native window: %s (%d)", strerror(-err), -err);
             return err;
         }
-        if (queuesToNativeWindow != 1) {
+
+        // Check if the ANativeWindow uses hardware protected buffers.
+        if (queuesToNativeWindow != 1 && !(consumerUsage & GRALLOC_USAGE_PROTECTED)) {
             ALOGE("native window could not be authenticated");
             return PERMISSION_DENIED;
         }
     }
 
-    int consumerUsage = 0;
-    err = nativeWindow->query(nativeWindow, NATIVE_WINDOW_CONSUMER_USAGE_BITS, &consumerUsage);
-    if (err != NO_ERROR) {
-        ALOGW("failed to get consumer usage bits. ignoring");
-        err = NO_ERROR;
-    }
-
     int finalUsage = usage | consumerUsage;
     ALOGV("gralloc usage: %#x(producer) + %#x(consumer) = %#x", usage, consumerUsage, finalUsage);
     err = native_window_set_usage(nativeWindow, finalUsage);
@@ -123,7 +141,8 @@
     }
 
     err = setNativeWindowSizeFormatAndUsage(
-            nativeWindow, 1, 1, HAL_PIXEL_FORMAT_RGBX_8888, 0, GRALLOC_USAGE_SW_WRITE_OFTEN);
+            nativeWindow, 1, 1, HAL_PIXEL_FORMAT_RGBX_8888, 0, GRALLOC_USAGE_SW_WRITE_OFTEN,
+            false /* reconnect */);
     if (err != NO_ERROR) {
         goto error;
     }
diff --git a/media/libstagefright/TimeSource.cpp b/media/libstagefright/TimeSource.cpp
deleted file mode 100644
index 041980f..0000000
--- a/media/libstagefright/TimeSource.cpp
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stddef.h>
-#include <sys/time.h>
-
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/TimeSource.h>
-
-namespace android {
-
-SystemTimeSource::SystemTimeSource()
-    : mStartTimeUs(ALooper::GetNowUs()) {
-}
-
-int64_t SystemTimeSource::getRealTimeUs() {
-    return ALooper::GetNowUs() - mStartTimeUs;
-}
-
-}  // namespace android
-
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
deleted file mode 100644
index 7d15220..0000000
--- a/media/libstagefright/TimedEventQueue.cpp
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#undef __STRICT_ANSI__
-#define __STDINT_LIMITS
-#define __STDC_LIMIT_MACROS
-
-#include <inttypes.h>
-#include <stdint.h>
-#include <sys/prctl.h>
-#include <sys/time.h>
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TimedEventQueue"
-#include <utils/Log.h>
-#include <utils/threads.h>
-
-#include "include/TimedEventQueue.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <binder/IServiceManager.h>
-#include <powermanager/PowerManager.h>
-#include <binder/IPCThreadState.h>
-#include <utils/CallStack.h>
-
-namespace android {
-
-static int64_t kWakelockMinDelay = 100000ll;  // 100ms
-
-TimedEventQueue::TimedEventQueue()
-    : mNextEventID(1),
-      mRunning(false),
-      mStopped(false),
-      mDeathRecipient(new PMDeathRecipient(this)),
-      mWakeLockCount(0) {
-}
-
-TimedEventQueue::~TimedEventQueue() {
-    stop();
-    if (mPowerManager != 0) {
-        sp<IBinder> binder = IInterface::asBinder(mPowerManager);
-        binder->unlinkToDeath(mDeathRecipient);
-    }
-}
-
-void TimedEventQueue::start() {
-    if (mRunning) {
-        return;
-    }
-
-    mStopped = false;
-
-    pthread_attr_t attr;
-    pthread_attr_init(&attr);
-    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
-
-    pthread_create(&mThread, &attr, ThreadWrapper, this);
-
-    pthread_attr_destroy(&attr);
-
-    mRunning = true;
-}
-
-void TimedEventQueue::stop(bool flush) {
-    if (!mRunning) {
-        return;
-    }
-
-    if (flush) {
-        postEventToBack(new StopEvent);
-    } else {
-        postTimedEvent(new StopEvent, INT64_MIN);
-    }
-
-    void *dummy;
-    pthread_join(mThread, &dummy);
-
-    // some events may be left in the queue if we did not flush and the wake lock
-    // must be released.
-    releaseWakeLock_l(true /*force*/);
-    mQueue.clear();
-
-    mRunning = false;
-}
-
-TimedEventQueue::event_id TimedEventQueue::postEvent(const sp<Event> &event) {
-    // Reserve an earlier timeslot an INT64_MIN to be able to post
-    // the StopEvent to the absolute head of the queue.
-    return postTimedEvent(event, INT64_MIN + 1);
-}
-
-TimedEventQueue::event_id TimedEventQueue::postEventToBack(
-        const sp<Event> &event) {
-    return postTimedEvent(event, INT64_MAX);
-}
-
-TimedEventQueue::event_id TimedEventQueue::postEventWithDelay(
-        const sp<Event> &event, int64_t delay_us) {
-    CHECK(delay_us >= 0);
-    return postTimedEvent(event, ALooper::GetNowUs() + delay_us);
-}
-
-TimedEventQueue::event_id TimedEventQueue::postTimedEvent(
-        const sp<Event> &event, int64_t realtime_us) {
-    Mutex::Autolock autoLock(mLock);
-
-    event->setEventID(mNextEventID++);
-
-    List<QueueItem>::iterator it = mQueue.begin();
-    while (it != mQueue.end() && realtime_us >= (*it).realtime_us) {
-        ++it;
-    }
-
-    QueueItem item;
-    item.event = event;
-    item.realtime_us = realtime_us;
-    item.has_wakelock = false;
-
-    if (it == mQueue.begin()) {
-        mQueueHeadChangedCondition.signal();
-    }
-
-    if (realtime_us > ALooper::GetNowUs() + kWakelockMinDelay) {
-        acquireWakeLock_l();
-        item.has_wakelock = true;
-    }
-    mQueue.insert(it, item);
-
-    mQueueNotEmptyCondition.signal();
-
-    return event->eventID();
-}
-
-static bool MatchesEventID(
-        void *cookie, const sp<TimedEventQueue::Event> &event) {
-    TimedEventQueue::event_id *id =
-        static_cast<TimedEventQueue::event_id *>(cookie);
-
-    if (event->eventID() != *id) {
-        return false;
-    }
-
-    *id = 0;
-
-    return true;
-}
-
-bool TimedEventQueue::cancelEvent(event_id id) {
-    if (id == 0) {
-        return false;
-    }
-
-    cancelEvents(&MatchesEventID, &id, true /* stopAfterFirstMatch */);
-
-    // if MatchesEventID found a match, it will have set id to 0
-    // (which is not a valid event_id).
-
-    return id == 0;
-}
-
-void TimedEventQueue::cancelEvents(
-        bool (*predicate)(void *cookie, const sp<Event> &event),
-        void *cookie,
-        bool stopAfterFirstMatch) {
-    Mutex::Autolock autoLock(mLock);
-
-    List<QueueItem>::iterator it = mQueue.begin();
-    while (it != mQueue.end()) {
-        if (!(*predicate)(cookie, (*it).event)) {
-            ++it;
-            continue;
-        }
-
-        if (it == mQueue.begin()) {
-            mQueueHeadChangedCondition.signal();
-        }
-
-        ALOGV("cancelling event %d", (*it).event->eventID());
-
-        (*it).event->setEventID(0);
-        if ((*it).has_wakelock) {
-            releaseWakeLock_l();
-        }
-        it = mQueue.erase(it);
-        if (stopAfterFirstMatch) {
-            return;
-        }
-    }
-}
-
-// static
-void *TimedEventQueue::ThreadWrapper(void *me) {
-
-    androidSetThreadPriority(0, ANDROID_PRIORITY_FOREGROUND);
-
-    static_cast<TimedEventQueue *>(me)->threadEntry();
-
-    return NULL;
-}
-
-void TimedEventQueue::threadEntry() {
-    prctl(PR_SET_NAME, (unsigned long)"TimedEventQueue", 0, 0, 0);
-
-    for (;;) {
-        int64_t now_us = 0;
-        sp<Event> event;
-        bool wakeLocked = false;
-
-        {
-            Mutex::Autolock autoLock(mLock);
-
-            if (mStopped) {
-                break;
-            }
-
-            while (mQueue.empty()) {
-                mQueueNotEmptyCondition.wait(mLock);
-            }
-
-            event_id eventID = 0;
-            for (;;) {
-                if (mQueue.empty()) {
-                    // The only event in the queue could have been cancelled
-                    // while we were waiting for its scheduled time.
-                    break;
-                }
-
-                List<QueueItem>::iterator it = mQueue.begin();
-                eventID = (*it).event->eventID();
-
-                now_us = ALooper::GetNowUs();
-                int64_t when_us = (*it).realtime_us;
-
-                int64_t delay_us;
-                if (when_us < 0 || when_us == INT64_MAX) {
-                    delay_us = 0;
-                } else {
-                    delay_us = when_us - now_us;
-                }
-
-                if (delay_us <= 0) {
-                    break;
-                }
-
-                static int64_t kMaxTimeoutUs = 10000000ll;  // 10 secs
-                bool timeoutCapped = false;
-                if (delay_us > kMaxTimeoutUs) {
-                    ALOGW("delay_us exceeds max timeout: %" PRId64 " us", delay_us);
-
-                    // We'll never block for more than 10 secs, instead
-                    // we will split up the full timeout into chunks of
-                    // 10 secs at a time. This will also avoid overflow
-                    // when converting from us to ns.
-                    delay_us = kMaxTimeoutUs;
-                    timeoutCapped = true;
-                }
-
-                status_t err = mQueueHeadChangedCondition.waitRelative(
-                        mLock, delay_us * 1000ll);
-
-                if (!timeoutCapped && err == -ETIMEDOUT) {
-                    // We finally hit the time this event is supposed to
-                    // trigger.
-                    now_us = ALooper::GetNowUs();
-                    break;
-                }
-            }
-
-            // The event w/ this id may have been cancelled while we're
-            // waiting for its trigger-time, in that case
-            // removeEventFromQueue_l will return NULL.
-            // Otherwise, the QueueItem will be removed
-            // from the queue and the referenced event returned.
-            event = removeEventFromQueue_l(eventID, &wakeLocked);
-        }
-
-        if (event != NULL) {
-            // Fire event with the lock NOT held.
-            event->fire(this, now_us);
-            if (wakeLocked) {
-                Mutex::Autolock autoLock(mLock);
-                releaseWakeLock_l();
-            }
-        }
-    }
-}
-
-sp<TimedEventQueue::Event> TimedEventQueue::removeEventFromQueue_l(
-        event_id id, bool *wakeLocked) {
-    for (List<QueueItem>::iterator it = mQueue.begin();
-         it != mQueue.end(); ++it) {
-        if ((*it).event->eventID() == id) {
-            sp<Event> event = (*it).event;
-            event->setEventID(0);
-            *wakeLocked = (*it).has_wakelock;
-            mQueue.erase(it);
-            return event;
-        }
-    }
-
-    ALOGW("Event %d was not found in the queue, already cancelled?", id);
-
-    return NULL;
-}
-
-void TimedEventQueue::acquireWakeLock_l()
-{
-    if (mWakeLockCount == 0) {
-        CHECK(mWakeLockToken == 0);
-        if (mPowerManager == 0) {
-            // use checkService() to avoid blocking if power service is not up yet
-            sp<IBinder> binder =
-                defaultServiceManager()->checkService(String16("power"));
-            if (binder == 0) {
-                ALOGW("cannot connect to the power manager service");
-            } else {
-                mPowerManager = interface_cast<IPowerManager>(binder);
-                binder->linkToDeath(mDeathRecipient);
-            }
-        }
-        if (mPowerManager != 0) {
-            sp<IBinder> binder = new BBinder();
-            int64_t token = IPCThreadState::self()->clearCallingIdentity();
-            status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
-                                                             binder,
-                                                             String16("TimedEventQueue"),
-                                                             String16("media"));    // not oneway
-            IPCThreadState::self()->restoreCallingIdentity(token);
-            if (status == NO_ERROR) {
-                mWakeLockToken = binder;
-                mWakeLockCount++;
-            }
-        }
-    } else {
-        mWakeLockCount++;
-    }
-}
-
-void TimedEventQueue::releaseWakeLock_l(bool force)
-{
-    if (mWakeLockCount == 0) {
-        return;
-    }
-    if (force) {
-        // Force wakelock release below by setting reference count to 1.
-        mWakeLockCount = 1;
-    }
-    if (--mWakeLockCount == 0) {
-        CHECK(mWakeLockToken != 0);
-        if (mPowerManager != 0) {
-            int64_t token = IPCThreadState::self()->clearCallingIdentity();
-            mPowerManager->releaseWakeLock(mWakeLockToken, 0);  // not oneway
-            IPCThreadState::self()->restoreCallingIdentity(token);
-        }
-        mWakeLockToken.clear();
-    }
-}
-
-void TimedEventQueue::clearPowerManager()
-{
-    Mutex::Autolock _l(mLock);
-    releaseWakeLock_l(true /*force*/);
-    mPowerManager.clear();
-}
-
-void TimedEventQueue::PMDeathRecipient::binderDied(
-        const wp<IBinder>& /* who */) {
-    mQueue->clearPowerManager();
-}
-
-}  // namespace android
-
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 0d9dc3a..8a0009c 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -18,14 +18,24 @@
 #define LOG_TAG "Utils"
 #include <utils/Log.h>
 #include <ctype.h>
+#include <stdio.h>
+#include <sys/stat.h>
+
+#include <utility>
+#include <vector>
 
 #include "include/ESDS.h"
+#include "include/HevcUtils.h"
 
 #include <arpa/inet.h>
 #include <cutils/properties.h>
 #include <media/openmax/OMX_Audio.h>
+#include <media/openmax/OMX_Video.h>
+#include <media/openmax/OMX_VideoExt.h>
+#include <media/stagefright/CodecBase.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALookup.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaDefs.h>
@@ -87,12 +97,515 @@
     return OK;
 }
 
+#if 0
+static void convertMetaDataToMessageInt32(
+        const sp<MetaData> &meta, sp<AMessage> &msg, uint32_t key, const char *name) {
+    int32_t value;
+    if (meta->findInt32(key, &value)) {
+        msg->setInt32(name, value);
+    }
+}
+#endif
+
+static void convertMetaDataToMessageColorAspects(const sp<MetaData> &meta, sp<AMessage> &msg) {
+    // 0 values are unspecified
+    int32_t range = 0;
+    int32_t primaries = 0;
+    int32_t transferFunction = 0;
+    int32_t colorMatrix = 0;
+    meta->findInt32(kKeyColorRange, &range);
+    meta->findInt32(kKeyColorPrimaries, &primaries);
+    meta->findInt32(kKeyTransferFunction, &transferFunction);
+    meta->findInt32(kKeyColorMatrix, &colorMatrix);
+    ColorAspects colorAspects;
+    memset(&colorAspects, 0, sizeof(colorAspects));
+    colorAspects.mRange = (ColorAspects::Range)range;
+    colorAspects.mPrimaries = (ColorAspects::Primaries)primaries;
+    colorAspects.mTransfer = (ColorAspects::Transfer)transferFunction;
+    colorAspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)colorMatrix;
+
+    int32_t rangeMsg, standardMsg, transferMsg;
+    if (CodecBase::convertCodecColorAspectsToPlatformAspects(
+            colorAspects, &rangeMsg, &standardMsg, &transferMsg) != OK) {
+        return;
+    }
+
+    // save specified values to msg
+    if (rangeMsg != 0) {
+        msg->setInt32("color-range", rangeMsg);
+    }
+    if (standardMsg != 0) {
+        msg->setInt32("color-standard", standardMsg);
+    }
+    if (transferMsg != 0) {
+        msg->setInt32("color-transfer", transferMsg);
+    }
+}
+
+static bool isHdr(const sp<AMessage> &format) {
+    // if CSD specifies HDR transfer(s), we assume HDR. Otherwise, if it specifies non-HDR
+    // transfers, we must assume non-HDR. This is because CSD trumps any color-transfer key
+    // in the format.
+    int32_t isHdr;
+    if (format->findInt32("android._is-hdr", &isHdr)) {
+        return isHdr;
+    }
+
+    // if user/container supplied HDR static info without transfer set, assume true
+    if (format->contains("hdr-static-info") && !format->contains("color-transfer")) {
+        return true;
+    }
+    // otherwise, verify that an HDR transfer function is set
+    int32_t transfer;
+    if (format->findInt32("color-transfer", &transfer)) {
+        return transfer == ColorUtils::kColorTransferST2084
+                || transfer == ColorUtils::kColorTransferHLG;
+    }
+    return false;
+}
+
+static void parseAacProfileFromCsd(const sp<ABuffer> &csd, sp<AMessage> &format) {
+    if (csd->size() < 2) {
+        return;
+    }
+
+    uint16_t audioObjectType = U16_AT((uint8_t*)csd->data());
+    if ((audioObjectType & 0xF800) == 0xF800) {
+        audioObjectType = 32 + ((audioObjectType >> 5) & 0x3F);
+    } else {
+        audioObjectType >>= 11;
+    }
+
+    const static ALookup<uint16_t, OMX_AUDIO_AACPROFILETYPE> profiles {
+        { 1,  OMX_AUDIO_AACObjectMain     },
+        { 2,  OMX_AUDIO_AACObjectLC       },
+        { 3,  OMX_AUDIO_AACObjectSSR      },
+        { 4,  OMX_AUDIO_AACObjectLTP      },
+        { 5,  OMX_AUDIO_AACObjectHE       },
+        { 6,  OMX_AUDIO_AACObjectScalable },
+        { 17, OMX_AUDIO_AACObjectERLC     },
+        { 23, OMX_AUDIO_AACObjectLD       },
+        { 29, OMX_AUDIO_AACObjectHE_PS    },
+        { 39, OMX_AUDIO_AACObjectELD      },
+    };
+
+    OMX_AUDIO_AACPROFILETYPE profile;
+    if (profiles.map(audioObjectType, &profile)) {
+        format->setInt32("profile", profile);
+    }
+}
+
+static void parseAvcProfileLevelFromAvcc(const uint8_t *ptr, size_t size, sp<AMessage> &format) {
+    if (size < 4 || ptr[0] != 1) {  // configurationVersion == 1
+        return;
+    }
+    const uint8_t profile = ptr[1];
+    const uint8_t constraints = ptr[2];
+    const uint8_t level = ptr[3];
+
+    const static ALookup<uint8_t, OMX_VIDEO_AVCLEVELTYPE> levels {
+        {  9, OMX_VIDEO_AVCLevel1b }, // technically, 9 is only used for High+ profiles
+        { 10, OMX_VIDEO_AVCLevel1  },
+        { 11, OMX_VIDEO_AVCLevel11 }, // prefer level 1.1 for the value 11
+        { 11, OMX_VIDEO_AVCLevel1b },
+        { 12, OMX_VIDEO_AVCLevel12 },
+        { 13, OMX_VIDEO_AVCLevel13 },
+        { 20, OMX_VIDEO_AVCLevel2  },
+        { 21, OMX_VIDEO_AVCLevel21 },
+        { 22, OMX_VIDEO_AVCLevel22 },
+        { 30, OMX_VIDEO_AVCLevel3  },
+        { 31, OMX_VIDEO_AVCLevel31 },
+        { 32, OMX_VIDEO_AVCLevel32 },
+        { 40, OMX_VIDEO_AVCLevel4  },
+        { 41, OMX_VIDEO_AVCLevel41 },
+        { 42, OMX_VIDEO_AVCLevel42 },
+        { 50, OMX_VIDEO_AVCLevel5  },
+        { 51, OMX_VIDEO_AVCLevel51 },
+        { 52, OMX_VIDEO_AVCLevel52 },
+    };
+    const static ALookup<uint8_t, OMX_VIDEO_AVCPROFILETYPE> profiles {
+        { 66, OMX_VIDEO_AVCProfileBaseline },
+        { 77, OMX_VIDEO_AVCProfileMain     },
+        { 88, OMX_VIDEO_AVCProfileExtended },
+        { 100, OMX_VIDEO_AVCProfileHigh    },
+        { 110, OMX_VIDEO_AVCProfileHigh10  },
+        { 122, OMX_VIDEO_AVCProfileHigh422 },
+        { 244, OMX_VIDEO_AVCProfileHigh444 },
+    };
+
+    // set profile & level if they are recognized
+    OMX_VIDEO_AVCPROFILETYPE codecProfile;
+    OMX_VIDEO_AVCLEVELTYPE codecLevel;
+    if (profiles.map(profile, &codecProfile)) {
+        format->setInt32("profile", codecProfile);
+        if (levels.map(level, &codecLevel)) {
+            // for 9 && 11 decide level based on profile and constraint_set3 flag
+            if (level == 11 && (profile == 66 || profile == 77 || profile == 88)) {
+                codecLevel = (constraints & 0x10) ? OMX_VIDEO_AVCLevel1b : OMX_VIDEO_AVCLevel11;
+            }
+            format->setInt32("level", codecLevel);
+        }
+    }
+}
+
+static void parseH263ProfileLevelFromD263(const uint8_t *ptr, size_t size, sp<AMessage> &format) {
+    if (size < 7) {
+        return;
+    }
+
+    const uint8_t profile = ptr[6];
+    const uint8_t level = ptr[5];
+
+    const static ALookup<uint8_t, OMX_VIDEO_H263PROFILETYPE> profiles {
+        { 0, OMX_VIDEO_H263ProfileBaseline },
+        { 1, OMX_VIDEO_H263ProfileH320Coding },
+        { 2, OMX_VIDEO_H263ProfileBackwardCompatible },
+        { 3, OMX_VIDEO_H263ProfileISWV2 },
+        { 4, OMX_VIDEO_H263ProfileISWV3 },
+        { 5, OMX_VIDEO_H263ProfileHighCompression },
+        { 6, OMX_VIDEO_H263ProfileInternet },
+        { 7, OMX_VIDEO_H263ProfileInterlace },
+        { 8, OMX_VIDEO_H263ProfileHighLatency },
+    };
+
+    const static ALookup<uint8_t, OMX_VIDEO_H263LEVELTYPE> levels {
+        { 10, OMX_VIDEO_H263Level10 },
+        { 20, OMX_VIDEO_H263Level20 },
+        { 30, OMX_VIDEO_H263Level30 },
+        { 40, OMX_VIDEO_H263Level40 },
+        { 45, OMX_VIDEO_H263Level45 },
+        { 50, OMX_VIDEO_H263Level50 },
+        { 60, OMX_VIDEO_H263Level60 },
+        { 70, OMX_VIDEO_H263Level70 },
+    };
+
+    // set profile & level if they are recognized
+    OMX_VIDEO_H263PROFILETYPE codecProfile;
+    OMX_VIDEO_H263LEVELTYPE codecLevel;
+    if (profiles.map(profile, &codecProfile)) {
+        format->setInt32("profile", codecProfile);
+        if (levels.map(level, &codecLevel)) {
+            format->setInt32("level", codecLevel);
+        }
+    }
+}
+
+static void parseHevcProfileLevelFromHvcc(const uint8_t *ptr, size_t size, sp<AMessage> &format) {
+    if (size < 13 || ptr[0] != 1) {  // configurationVersion == 1
+        return;
+    }
+
+    const uint8_t profile = ptr[1] & 0x1F;
+    const uint8_t tier = (ptr[1] & 0x20) >> 5;
+    const uint8_t level = ptr[12];
+
+    const static ALookup<std::pair<uint8_t, uint8_t>, OMX_VIDEO_HEVCLEVELTYPE> levels {
+        { { 0, 30  }, OMX_VIDEO_HEVCMainTierLevel1  },
+        { { 0, 60  }, OMX_VIDEO_HEVCMainTierLevel2  },
+        { { 0, 63  }, OMX_VIDEO_HEVCMainTierLevel21 },
+        { { 0, 90  }, OMX_VIDEO_HEVCMainTierLevel3  },
+        { { 0, 93  }, OMX_VIDEO_HEVCMainTierLevel31 },
+        { { 0, 120 }, OMX_VIDEO_HEVCMainTierLevel4  },
+        { { 0, 123 }, OMX_VIDEO_HEVCMainTierLevel41 },
+        { { 0, 150 }, OMX_VIDEO_HEVCMainTierLevel5  },
+        { { 0, 153 }, OMX_VIDEO_HEVCMainTierLevel51 },
+        { { 0, 156 }, OMX_VIDEO_HEVCMainTierLevel52 },
+        { { 0, 180 }, OMX_VIDEO_HEVCMainTierLevel6  },
+        { { 0, 183 }, OMX_VIDEO_HEVCMainTierLevel61 },
+        { { 0, 186 }, OMX_VIDEO_HEVCMainTierLevel62 },
+        { { 1, 30  }, OMX_VIDEO_HEVCHighTierLevel1  },
+        { { 1, 60  }, OMX_VIDEO_HEVCHighTierLevel2  },
+        { { 1, 63  }, OMX_VIDEO_HEVCHighTierLevel21 },
+        { { 1, 90  }, OMX_VIDEO_HEVCHighTierLevel3  },
+        { { 1, 93  }, OMX_VIDEO_HEVCHighTierLevel31 },
+        { { 1, 120 }, OMX_VIDEO_HEVCHighTierLevel4  },
+        { { 1, 123 }, OMX_VIDEO_HEVCHighTierLevel41 },
+        { { 1, 150 }, OMX_VIDEO_HEVCHighTierLevel5  },
+        { { 1, 153 }, OMX_VIDEO_HEVCHighTierLevel51 },
+        { { 1, 156 }, OMX_VIDEO_HEVCHighTierLevel52 },
+        { { 1, 180 }, OMX_VIDEO_HEVCHighTierLevel6  },
+        { { 1, 183 }, OMX_VIDEO_HEVCHighTierLevel61 },
+        { { 1, 186 }, OMX_VIDEO_HEVCHighTierLevel62 },
+    };
+
+    const static ALookup<uint8_t, OMX_VIDEO_HEVCPROFILETYPE> profiles {
+        { 1, OMX_VIDEO_HEVCProfileMain   },
+        { 2, OMX_VIDEO_HEVCProfileMain10 },
+    };
+
+    // set profile & level if they are recognized
+    OMX_VIDEO_HEVCPROFILETYPE codecProfile;
+    OMX_VIDEO_HEVCLEVELTYPE codecLevel;
+    if (!profiles.map(profile, &codecProfile)) {
+        if (ptr[2] & 0x40 /* general compatibility flag 1 */) {
+            codecProfile = OMX_VIDEO_HEVCProfileMain;
+        } else if (ptr[2] & 0x20 /* general compatibility flag 2 */) {
+            codecProfile = OMX_VIDEO_HEVCProfileMain10;
+        } else {
+            return;
+        }
+    }
+
+    // bump to HDR profile
+    if (isHdr(format) && codecProfile == OMX_VIDEO_HEVCProfileMain10) {
+        codecProfile = OMX_VIDEO_HEVCProfileMain10HDR10;
+    }
+
+    format->setInt32("profile", codecProfile);
+    if (levels.map(std::make_pair(tier, level), &codecLevel)) {
+        format->setInt32("level", codecLevel);
+    }
+}
+
+static void parseMpeg2ProfileLevelFromHeader(
+        const uint8_t *data, size_t size, sp<AMessage> &format) {
+    // find sequence extension
+    const uint8_t *seq = (const uint8_t*)memmem(data, size, "\x00\x00\x01\xB5", 4);
+    if (seq != NULL && seq + 5 < data + size) {
+        const uint8_t start_code = seq[4] >> 4;
+        if (start_code != 1 /* sequence extension ID */) {
+            return;
+        }
+        const uint8_t indication = ((seq[4] & 0xF) << 4) | ((seq[5] & 0xF0) >> 4);
+
+        const static ALookup<uint8_t, OMX_VIDEO_MPEG2PROFILETYPE> profiles {
+            { 0x50, OMX_VIDEO_MPEG2ProfileSimple  },
+            { 0x40, OMX_VIDEO_MPEG2ProfileMain    },
+            { 0x30, OMX_VIDEO_MPEG2ProfileSNR     },
+            { 0x20, OMX_VIDEO_MPEG2ProfileSpatial },
+            { 0x10, OMX_VIDEO_MPEG2ProfileHigh    },
+        };
+
+        const static ALookup<uint8_t, OMX_VIDEO_MPEG2LEVELTYPE> levels {
+            { 0x0A, OMX_VIDEO_MPEG2LevelLL  },
+            { 0x08, OMX_VIDEO_MPEG2LevelML  },
+            { 0x06, OMX_VIDEO_MPEG2LevelH14 },
+            { 0x04, OMX_VIDEO_MPEG2LevelHL  },
+            { 0x02, OMX_VIDEO_MPEG2LevelHP  },
+        };
+
+        const static ALookup<uint8_t,
+                std::pair<OMX_VIDEO_MPEG2PROFILETYPE, OMX_VIDEO_MPEG2LEVELTYPE>> escapes {
+            /* unsupported
+            { 0x8E, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelLL  } },
+            { 0x8D, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelML  } },
+            { 0x8B, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelH14 } },
+            { 0x8A, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelHL  } }, */
+            { 0x85, { OMX_VIDEO_MPEG2Profile422, OMX_VIDEO_MPEG2LevelML  } },
+            { 0x82, { OMX_VIDEO_MPEG2Profile422, OMX_VIDEO_MPEG2LevelHL  } },
+        };
+
+        OMX_VIDEO_MPEG2PROFILETYPE profile;
+        OMX_VIDEO_MPEG2LEVELTYPE level;
+        std::pair<OMX_VIDEO_MPEG2PROFILETYPE, OMX_VIDEO_MPEG2LEVELTYPE> profileLevel;
+        if (escapes.map(indication, &profileLevel)) {
+            format->setInt32("profile", profileLevel.first);
+            format->setInt32("level", profileLevel.second);
+        } else if (profiles.map(indication & 0x70, &profile)) {
+            format->setInt32("profile", profile);
+            if (levels.map(indication & 0xF, &level)) {
+                format->setInt32("level", level);
+            }
+        }
+    }
+}
+
+static void parseMpeg2ProfileLevelFromEsds(ESDS &esds, sp<AMessage> &format) {
+    // esds seems to only contain the profile for MPEG-2
+    uint8_t objType;
+    if (esds.getObjectTypeIndication(&objType) == OK) {
+        const static ALookup<uint8_t, OMX_VIDEO_MPEG2PROFILETYPE> profiles{
+            { 0x60, OMX_VIDEO_MPEG2ProfileSimple  },
+            { 0x61, OMX_VIDEO_MPEG2ProfileMain    },
+            { 0x62, OMX_VIDEO_MPEG2ProfileSNR     },
+            { 0x63, OMX_VIDEO_MPEG2ProfileSpatial },
+            { 0x64, OMX_VIDEO_MPEG2ProfileHigh    },
+            { 0x65, OMX_VIDEO_MPEG2Profile422     },
+        };
+
+        OMX_VIDEO_MPEG2PROFILETYPE profile;
+        if (profiles.map(objType, &profile)) {
+            format->setInt32("profile", profile);
+        }
+    }
+}
+
+static void parseMpeg4ProfileLevelFromCsd(const sp<ABuffer> &csd, sp<AMessage> &format) {
+    const uint8_t *data = csd->data();
+    // find visual object sequence
+    const uint8_t *seq = (const uint8_t*)memmem(data, csd->size(), "\x00\x00\x01\xB0", 4);
+    if (seq != NULL && seq + 4 < data + csd->size()) {
+        const uint8_t indication = seq[4];
+
+        const static ALookup<uint8_t,
+                std::pair<OMX_VIDEO_MPEG4PROFILETYPE, OMX_VIDEO_MPEG4LEVELTYPE>> table {
+            { 0b00000001, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level1  } },
+            { 0b00000010, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level2  } },
+            { 0b00000011, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level3  } },
+            { 0b00000100, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level4a } },
+            { 0b00000101, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level5  } },
+            { 0b00000110, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level6  } },
+            { 0b00001000, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level0  } },
+            { 0b00001001, { OMX_VIDEO_MPEG4ProfileSimple,            OMX_VIDEO_MPEG4Level0b } },
+            { 0b00010000, { OMX_VIDEO_MPEG4ProfileSimpleScalable,    OMX_VIDEO_MPEG4Level0  } },
+            { 0b00010001, { OMX_VIDEO_MPEG4ProfileSimpleScalable,    OMX_VIDEO_MPEG4Level1  } },
+            { 0b00010010, { OMX_VIDEO_MPEG4ProfileSimpleScalable,    OMX_VIDEO_MPEG4Level2  } },
+            /* unsupported
+            { 0b00011101, { XXX_MPEG4ProfileSimpleScalableER,        OMX_VIDEO_MPEG4Level0  } },
+            { 0b00011110, { XXX_MPEG4ProfileSimpleScalableER,        OMX_VIDEO_MPEG4Level1  } },
+            { 0b00011111, { XXX_MPEG4ProfileSimpleScalableER,        OMX_VIDEO_MPEG4Level2  } }, */
+            { 0b00100001, { OMX_VIDEO_MPEG4ProfileCore,              OMX_VIDEO_MPEG4Level1  } },
+            { 0b00100010, { OMX_VIDEO_MPEG4ProfileCore,              OMX_VIDEO_MPEG4Level2  } },
+            { 0b00110010, { OMX_VIDEO_MPEG4ProfileMain,              OMX_VIDEO_MPEG4Level2  } },
+            { 0b00110011, { OMX_VIDEO_MPEG4ProfileMain,              OMX_VIDEO_MPEG4Level3  } },
+            { 0b00110100, { OMX_VIDEO_MPEG4ProfileMain,              OMX_VIDEO_MPEG4Level4  } },
+            /* deprecated
+            { 0b01000010, { OMX_VIDEO_MPEG4ProfileNbit,              OMX_VIDEO_MPEG4Level2  } }, */
+            { 0b01010001, { OMX_VIDEO_MPEG4ProfileScalableTexture,   OMX_VIDEO_MPEG4Level1  } },
+            { 0b01100001, { OMX_VIDEO_MPEG4ProfileSimpleFace,        OMX_VIDEO_MPEG4Level1  } },
+            { 0b01100010, { OMX_VIDEO_MPEG4ProfileSimpleFace,        OMX_VIDEO_MPEG4Level2  } },
+            { 0b01100011, { OMX_VIDEO_MPEG4ProfileSimpleFBA,         OMX_VIDEO_MPEG4Level1  } },
+            { 0b01100100, { OMX_VIDEO_MPEG4ProfileSimpleFBA,         OMX_VIDEO_MPEG4Level2  } },
+            { 0b01110001, { OMX_VIDEO_MPEG4ProfileBasicAnimated,     OMX_VIDEO_MPEG4Level1  } },
+            { 0b01110010, { OMX_VIDEO_MPEG4ProfileBasicAnimated,     OMX_VIDEO_MPEG4Level2  } },
+            { 0b10000001, { OMX_VIDEO_MPEG4ProfileHybrid,            OMX_VIDEO_MPEG4Level1  } },
+            { 0b10000010, { OMX_VIDEO_MPEG4ProfileHybrid,            OMX_VIDEO_MPEG4Level2  } },
+            { 0b10010001, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime,  OMX_VIDEO_MPEG4Level1  } },
+            { 0b10010010, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime,  OMX_VIDEO_MPEG4Level2  } },
+            { 0b10010011, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime,  OMX_VIDEO_MPEG4Level3  } },
+            { 0b10010100, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime,  OMX_VIDEO_MPEG4Level4  } },
+            { 0b10100001, { OMX_VIDEO_MPEG4ProfileCoreScalable,      OMX_VIDEO_MPEG4Level1  } },
+            { 0b10100010, { OMX_VIDEO_MPEG4ProfileCoreScalable,      OMX_VIDEO_MPEG4Level2  } },
+            { 0b10100011, { OMX_VIDEO_MPEG4ProfileCoreScalable,      OMX_VIDEO_MPEG4Level3  } },
+            { 0b10110001, { OMX_VIDEO_MPEG4ProfileAdvancedCoding,    OMX_VIDEO_MPEG4Level1  } },
+            { 0b10110010, { OMX_VIDEO_MPEG4ProfileAdvancedCoding,    OMX_VIDEO_MPEG4Level2  } },
+            { 0b10110011, { OMX_VIDEO_MPEG4ProfileAdvancedCoding,    OMX_VIDEO_MPEG4Level3  } },
+            { 0b10110100, { OMX_VIDEO_MPEG4ProfileAdvancedCoding,    OMX_VIDEO_MPEG4Level4  } },
+            { 0b11000001, { OMX_VIDEO_MPEG4ProfileAdvancedCore,      OMX_VIDEO_MPEG4Level1  } },
+            { 0b11000010, { OMX_VIDEO_MPEG4ProfileAdvancedCore,      OMX_VIDEO_MPEG4Level2  } },
+            { 0b11010001, { OMX_VIDEO_MPEG4ProfileAdvancedScalable,  OMX_VIDEO_MPEG4Level1  } },
+            { 0b11010010, { OMX_VIDEO_MPEG4ProfileAdvancedScalable,  OMX_VIDEO_MPEG4Level2  } },
+            { 0b11010011, { OMX_VIDEO_MPEG4ProfileAdvancedScalable,  OMX_VIDEO_MPEG4Level3  } },
+            /* unsupported
+            { 0b11100001, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level1  } },
+            { 0b11100010, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level2  } },
+            { 0b11100011, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level3  } },
+            { 0b11100100, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level4  } },
+            { 0b11100101, { XXX_MPEG4ProfileCoreStudio,              OMX_VIDEO_MPEG4Level1  } },
+            { 0b11100110, { XXX_MPEG4ProfileCoreStudio,              OMX_VIDEO_MPEG4Level2  } },
+            { 0b11100111, { XXX_MPEG4ProfileCoreStudio,              OMX_VIDEO_MPEG4Level3  } },
+            { 0b11101000, { XXX_MPEG4ProfileCoreStudio,              OMX_VIDEO_MPEG4Level4  } },
+            { 0b11101011, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level5  } },
+            { 0b11101100, { XXX_MPEG4ProfileSimpleStudio,            OMX_VIDEO_MPEG4Level6  } }, */
+            { 0b11110000, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level0  } },
+            { 0b11110001, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level1  } },
+            { 0b11110010, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level2  } },
+            { 0b11110011, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level3  } },
+            { 0b11110100, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level4  } },
+            { 0b11110101, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level5  } },
+            { 0b11110111, { OMX_VIDEO_MPEG4ProfileAdvancedSimple,    OMX_VIDEO_MPEG4Level3b } },
+            /* deprecated
+            { 0b11111000, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level0  } },
+            { 0b11111001, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level1  } },
+            { 0b11111010, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level2  } },
+            { 0b11111011, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level3  } },
+            { 0b11111100, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level4  } },
+            { 0b11111101, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level5  } }, */
+        };
+
+        std::pair<OMX_VIDEO_MPEG4PROFILETYPE, OMX_VIDEO_MPEG4LEVELTYPE> profileLevel;
+        if (table.map(indication, &profileLevel)) {
+            format->setInt32("profile", profileLevel.first);
+            format->setInt32("level", profileLevel.second);
+        }
+    }
+}
+
+static void parseVp9ProfileLevelFromCsd(const sp<ABuffer> &csd, sp<AMessage> &format) {
+    const uint8_t *data = csd->data();
+    size_t remaining = csd->size();
+
+    while (remaining >= 2) {
+        const uint8_t id = data[0];
+        const uint8_t length = data[1];
+        remaining -= 2;
+        data += 2;
+        if (length > remaining) {
+            break;
+        }
+        switch (id) {
+            case 1 /* profileId */:
+                if (length >= 1) {
+                    const static ALookup<uint8_t, OMX_VIDEO_VP9PROFILETYPE> profiles {
+                        { 0, OMX_VIDEO_VP9Profile0 },
+                        { 1, OMX_VIDEO_VP9Profile1 },
+                        { 2, OMX_VIDEO_VP9Profile2 },
+                        { 3, OMX_VIDEO_VP9Profile3 },
+                    };
+
+                    const static ALookup<OMX_VIDEO_VP9PROFILETYPE, OMX_VIDEO_VP9PROFILETYPE> toHdr {
+                        { OMX_VIDEO_VP9Profile2, OMX_VIDEO_VP9Profile2HDR },
+                        { OMX_VIDEO_VP9Profile3, OMX_VIDEO_VP9Profile3HDR },
+                    };
+
+                    OMX_VIDEO_VP9PROFILETYPE profile;
+                    if (profiles.map(data[0], &profile)) {
+                        // convert to HDR profile
+                        if (isHdr(format)) {
+                            toHdr.lookup(profile, &profile);
+                        }
+
+                        format->setInt32("profile", profile);
+                    }
+                }
+                break;
+            case 2 /* levelId */:
+                if (length >= 1) {
+                    const static ALookup<uint8_t, OMX_VIDEO_VP9LEVELTYPE> levels {
+                        { 10, OMX_VIDEO_VP9Level1  },
+                        { 11, OMX_VIDEO_VP9Level11 },
+                        { 20, OMX_VIDEO_VP9Level2  },
+                        { 21, OMX_VIDEO_VP9Level21 },
+                        { 30, OMX_VIDEO_VP9Level3  },
+                        { 31, OMX_VIDEO_VP9Level31 },
+                        { 40, OMX_VIDEO_VP9Level4  },
+                        { 41, OMX_VIDEO_VP9Level41 },
+                        { 50, OMX_VIDEO_VP9Level5  },
+                        { 51, OMX_VIDEO_VP9Level51 },
+                        { 52, OMX_VIDEO_VP9Level52 },
+                        { 60, OMX_VIDEO_VP9Level6  },
+                        { 61, OMX_VIDEO_VP9Level61 },
+                        { 62, OMX_VIDEO_VP9Level62 },
+                    };
+
+                    OMX_VIDEO_VP9LEVELTYPE level;
+                    if (levels.map(data[0], &level)) {
+                        format->setInt32("level", level);
+                    }
+                }
+                break;
+            default:
+                break;
+        }
+        remaining -= length;
+        data += length;
+    }
+}
+
 status_t convertMetaDataToMessage(
         const sp<MetaData> &meta, sp<AMessage> *format) {
+
     format->clear();
 
+    if (meta == NULL) {
+        ALOGE("convertMetaDataToMessage: NULL input");
+        return BAD_VALUE;
+    }
+
     const char *mime;
-    CHECK(meta->findCString(kKeyMIMEType, &mime));
+    if (!meta->findCString(kKeyMIMEType, &mime)) {
+        return BAD_VALUE;
+    }
 
     sp<AMessage> msg = new AMessage;
     msg->setString("mime", mime);
@@ -102,9 +615,15 @@
         msg->setInt64("durationUs", durationUs);
     }
 
-    int avgBitRate;
-    if (meta->findInt32(kKeyBitRate, &avgBitRate)) {
-        msg->setInt32("bit-rate", avgBitRate);
+    int32_t avgBitRate = 0;
+    if (meta->findInt32(kKeyBitRate, &avgBitRate) && avgBitRate > 0) {
+        msg->setInt32("bitrate", avgBitRate);
+    }
+
+    int32_t maxBitRate;
+    if (meta->findInt32(kKeyMaxBitRate, &maxBitRate)
+            && maxBitRate > 0 && maxBitRate >= avgBitRate) {
+        msg->setInt32("max-bitrate", maxBitRate);
     }
 
     int32_t isSync;
@@ -112,10 +631,18 @@
         msg->setInt32("is-sync-frame", 1);
     }
 
+    // this only needs to be translated from meta to message as it is an extractor key
+    int32_t trackID;
+    if (meta->findInt32(kKeyTrackID, &trackID)) {
+        msg->setInt32("track-id", trackID);
+    }
+
     if (!strncasecmp("video/", mime, 6)) {
         int32_t width, height;
-        CHECK(meta->findInt32(kKeyWidth, &width));
-        CHECK(meta->findInt32(kKeyHeight, &height));
+        if (!meta->findInt32(kKeyWidth, &width)
+                || !meta->findInt32(kKeyHeight, &height)) {
+            return BAD_VALUE;
+        }
 
         msg->setInt32("width", width);
         msg->setInt32("height", height);
@@ -145,10 +672,22 @@
         if (meta->findInt32(kKeyRotation, &rotationDegrees)) {
             msg->setInt32("rotation-degrees", rotationDegrees);
         }
+
+        uint32_t type;
+        const void *data;
+        size_t size;
+        if (meta->findData(kKeyHdrStaticInfo, &type, &data, &size)
+                && type == 'hdrS' && size == sizeof(HDRStaticInfo)) {
+            ColorUtils::setHDRStaticInfoIntoFormat(*(HDRStaticInfo*)data, msg);
+        }
+
+        convertMetaDataToMessageColorAspects(meta, msg);
     } else if (!strncasecmp("audio/", mime, 6)) {
         int32_t numChannels, sampleRate;
-        CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
-        CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+        if (!meta->findInt32(kKeyChannelCount, &numChannels)
+                || !meta->findInt32(kKeySampleRate, &sampleRate)) {
+            return BAD_VALUE;
+        }
 
         msg->setInt32("channel-count", numChannels);
         msg->setInt32("sample-rate", sampleRate);
@@ -169,13 +708,18 @@
 
         int32_t isADTS;
         if (meta->findInt32(kKeyIsADTS, &isADTS)) {
-            msg->setInt32("is-adts", true);
+            msg->setInt32("is-adts", isADTS);
         }
 
         int32_t aacProfile = -1;
         if (meta->findInt32(kKeyAACAOT, &aacProfile)) {
             msg->setInt32("aac-profile", aacProfile);
         }
+
+        int32_t pcmEncoding;
+        if (meta->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
+            msg->setInt32("pcm-encoding", pcmEncoding);
+        }
     }
 
     int32_t maxInputSize;
@@ -215,8 +759,8 @@
             ALOGE("b/23680780");
             return BAD_VALUE;
         }
-        uint8_t profile __unused = ptr[1];
-        uint8_t level __unused = ptr[3];
+
+        parseAvcProfileLevelFromAvcc(ptr, size, msg);
 
         // There is decodable content out there that fails the following
         // assertion, let's be lenient for now...
@@ -312,12 +856,11 @@
             ALOGE("b/23680780");
             return BAD_VALUE;
         }
-        uint8_t profile __unused = ptr[1] & 31;
-        uint8_t level __unused = ptr[12];
+
+        const size_t dataSize = size; // save for later
         ptr += 22;
         size -= 22;
 
-
         size_t numofArrays = (char)ptr[0];
         ptr += 1;
         size -= 1;
@@ -329,6 +872,8 @@
         }
         buffer->setRange(0, 0);
 
+        HevcParameterSets hvcc;
+
         for (i = 0; i < numofArrays; i++) {
             if (size < 3) {
                 ALOGE("b/23680780");
@@ -360,6 +905,7 @@
                 if (err != OK) {
                     return err;
                 }
+                (void)hvcc.addNalUnit(ptr, length);
 
                 ptr += length;
                 size -= length;
@@ -369,9 +915,19 @@
         buffer->meta()->setInt64("timeUs", 0);
         msg->setBuffer("csd-0", buffer);
 
+        // if we saw VUI color information we know whether this is HDR because VUI trumps other
+        // format parameters for HEVC.
+        HevcParameterSets::Info info = hvcc.getInfo();
+        if (info & hvcc.kInfoHasColorDescription) {
+            msg->setInt32("android._is-hdr", (info & hvcc.kInfoIsHdr) != 0);
+        }
+
+        parseHevcProfileLevelFromHvcc((const uint8_t *)data, dataSize, msg);
     } else if (meta->findData(kKeyESDS, &type, &data, &size)) {
         ESDS esds((const char *)data, size);
-        CHECK_EQ(esds.InitCheck(), (status_t)OK);
+        if (esds.InitCheck() != (status_t)OK) {
+            return BAD_VALUE;
+        }
 
         const void *codec_specific_data;
         size_t codec_specific_data_size;
@@ -389,6 +945,34 @@
         buffer->meta()->setInt32("csd", true);
         buffer->meta()->setInt64("timeUs", 0);
         msg->setBuffer("csd-0", buffer);
+
+        if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)) {
+            parseMpeg4ProfileLevelFromCsd(buffer, msg);
+        } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
+            parseMpeg2ProfileLevelFromEsds(esds, msg);
+            if (meta->findData(kKeyStreamHeader, &type, &data, &size)) {
+                parseMpeg2ProfileLevelFromHeader((uint8_t*)data, size, msg);
+            }
+        } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
+            parseAacProfileFromCsd(buffer, msg);
+        }
+
+        uint32_t maxBitrate, avgBitrate;
+        if (esds.getBitRate(&maxBitrate, &avgBitrate) == OK) {
+            if (!meta->hasData(kKeyBitRate)
+                    && avgBitrate > 0 && avgBitrate <= INT32_MAX) {
+                msg->setInt32("bitrate", (int32_t)avgBitrate);
+            } else {
+                (void)msg->findInt32("bitrate", (int32_t*)&avgBitrate);
+            }
+            if (!meta->hasData(kKeyMaxBitRate)
+                    && maxBitrate > 0 && maxBitrate <= INT32_MAX && maxBitrate >= avgBitrate) {
+                msg->setInt32("max-bitrate", (int32_t)maxBitrate);
+            }
+        }
+    } else if (meta->findData(kTypeD263, &type, &data, &size)) {
+        const uint8_t *ptr = (const uint8_t *)data;
+        parseH263ProfileLevelFromD263(ptr, size, msg);
     } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
         sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
         if (buffer.get() == NULL || buffer->base() == NULL) {
@@ -451,6 +1035,25 @@
         buffer->meta()->setInt32("csd", true);
         buffer->meta()->setInt64("timeUs", 0);
         msg->setBuffer("csd-2", buffer);
+    } else if (meta->findData(kKeyVp9CodecPrivate, &type, &data, &size)) {
+        sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+        if (buffer.get() == NULL || buffer->base() == NULL) {
+            return NO_MEMORY;
+        }
+        memcpy(buffer->data(), data, size);
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-0", buffer);
+
+        parseVp9ProfileLevelFromCsd(buffer, msg);
+    }
+
+    // TODO expose "crypto-key"/kKeyCryptoKey through public api
+    if (meta->findData(kKeyCryptoKey, &type, &data, &size)) {
+        sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+        msg->setBuffer("crypto-key", buffer);
+        memcpy(buffer->data(), data, size);
     }
 
     *format = msg;
@@ -458,12 +1061,20 @@
     return OK;
 }
 
-static size_t reassembleAVCC(const sp<ABuffer> &csd0, const sp<ABuffer> csd1, char *avcc) {
+const uint8_t *findNextNalStartCode(const uint8_t *data, size_t length) {
+    uint8_t *res = NULL;
+    if (length > 4) {
+        // minus 1 as to not match NAL start code at end
+        res = (uint8_t *)memmem(data, length - 1, "\x00\x00\x00\x01", 4);
+    }
+    return res != NULL && res < data + length - 4 ? res : &data[length];
+}
 
+static size_t reassembleAVCC(const sp<ABuffer> &csd0, const sp<ABuffer> csd1, char *avcc) {
     avcc[0] = 1;        // version
-    avcc[1] = 0x64;     // profile
-    avcc[2] = 0;        // unused (?)
-    avcc[3] = 0xd;      // level
+    avcc[1] = 0x64;     // profile (default to high)
+    avcc[2] = 0;        // constraints (default to none)
+    avcc[3] = 0xd;      // level (default to 1.3)
     avcc[4] = 0xff;     // reserved+size
 
     size_t i = 0;
@@ -471,26 +1082,28 @@
     int lastparamoffset = 0;
     int avccidx = 6;
     do {
-        if (i >= csd0->size() - 4 ||
-                memcmp(csd0->data() + i, "\x00\x00\x00\x01", 4) == 0) {
-            if (i >= csd0->size() - 4) {
-                // there can't be another param here, so use all the rest
-                i = csd0->size();
+        i = findNextNalStartCode(csd0->data() + i, csd0->size() - i) - csd0->data();
+        ALOGV("block at %zu, last was %d", i, lastparamoffset);
+        if (lastparamoffset > 0) {
+            const uint8_t *lastparam = csd0->data() + lastparamoffset;
+            int size = i - lastparamoffset;
+            if (size > 3) {
+                if (numparams && memcmp(avcc + 1, lastparam + 1, 3)) {
+                    ALOGW("Inconsisted profile/level found in SPS: %x,%x,%x vs %x,%x,%x",
+                            avcc[1], avcc[2], avcc[3], lastparam[1], lastparam[2], lastparam[3]);
+                } else if (!numparams) {
+                    // fill in profile, constraints and level
+                    memcpy(avcc + 1, lastparam + 1, 3);
+                }
             }
-            ALOGV("block at %zu, last was %d", i, lastparamoffset);
-            if (lastparamoffset > 0) {
-                int size = i - lastparamoffset;
-                avcc[avccidx++] = size >> 8;
-                avcc[avccidx++] = size & 0xff;
-                memcpy(avcc+avccidx, csd0->data() + lastparamoffset, size);
-                avccidx += size;
-                numparams++;
-            }
-            i += 4;
-            lastparamoffset = i;
-        } else {
-            i++;
+            avcc[avccidx++] = size >> 8;
+            avcc[avccidx++] = size & 0xff;
+            memcpy(avcc+avccidx, lastparam, size);
+            avccidx += size;
+            numparams++;
         }
+        i += 4;
+        lastparamoffset = i;
     } while(i < csd0->size());
     ALOGV("csd0 contains %d params", numparams);
 
@@ -502,26 +1115,18 @@
     int numpicparamsoffset = avccidx;
     avccidx++;
     do {
-        if (i >= csd1->size() - 4 ||
-                memcmp(csd1->data() + i, "\x00\x00\x00\x01", 4) == 0) {
-            if (i >= csd1->size() - 4) {
-                // there can't be another param here, so use all the rest
-                i = csd1->size();
-            }
-            ALOGV("block at %zu, last was %d", i, lastparamoffset);
-            if (lastparamoffset > 0) {
-                int size = i - lastparamoffset;
-                avcc[avccidx++] = size >> 8;
-                avcc[avccidx++] = size & 0xff;
-                memcpy(avcc+avccidx, csd1->data() + lastparamoffset, size);
-                avccidx += size;
-                numparams++;
-            }
-            i += 4;
-            lastparamoffset = i;
-        } else {
-            i++;
+        i = findNextNalStartCode(csd1->data() + i, csd1->size() - i) - csd1->data();
+        ALOGV("block at %zu, last was %d", i, lastparamoffset);
+        if (lastparamoffset > 0) {
+            int size = i - lastparamoffset;
+            avcc[avccidx++] = size >> 8;
+            avcc[avccidx++] = size & 0xff;
+            memcpy(avcc+avccidx, csd1->data() + lastparamoffset, size);
+            avccidx += size;
+            numparams++;
         }
+        i += 4;
+        lastparamoffset = i;
     } while(i < csd1->size());
     avcc[numpicparamsoffset] = numparams;
     return avccidx;
@@ -545,15 +1150,16 @@
     esds[11] = 0x80 | ((configdescriptorsize >> 7) & 0x7f);
     esds[12] = (configdescriptorsize & 0x7f);
     esds[13] = 0x40; // objectTypeIndication
-    esds[14] = 0x15; // not sure what 14-25 mean, they are ignored by ESDS.cpp,
-    esds[15] = 0x00; // but the actual values here were taken from a real file.
+    // bytes 14-25 are examples from a real file. they are unused/overwritten by muxers.
+    esds[14] = 0x15; // streamType(5), upStream(0),
+    esds[15] = 0x00; // 15-17: bufferSizeDB (6KB)
     esds[16] = 0x18;
     esds[17] = 0x00;
-    esds[18] = 0x00;
+    esds[18] = 0x00; // 18-21: maxBitrate (64kbps)
     esds[19] = 0x00;
     esds[20] = 0xfa;
     esds[21] = 0x00;
-    esds[22] = 0x00;
+    esds[22] = 0x00; // 22-25: avgBitrate (64kbps)
     esds[23] = 0x00;
     esds[24] = 0xfa;
     esds[25] = 0x00;
@@ -564,7 +1170,80 @@
     esds[30] = (csd0size & 0x7f);
     memcpy((void*)&esds[31], csd0->data(), csd0size);
     // data following this is ignored, so don't bother appending it
+}
 
+static size_t reassembleHVCC(const sp<ABuffer> &csd0, uint8_t *hvcc, size_t hvccSize, size_t nalSizeLength) {
+    HevcParameterSets paramSets;
+    uint8_t* data = csd0->data();
+    if (csd0->size() < 4) {
+        ALOGE("csd0 too small");
+        return 0;
+    }
+    if (memcmp(data, "\x00\x00\x00\x01", 4) != 0) {
+        ALOGE("csd0 doesn't start with a start code");
+        return 0;
+    }
+    size_t prevNalOffset = 4;
+    status_t err = OK;
+    for (size_t i = 1; i < csd0->size() - 4; ++i) {
+        if (memcmp(&data[i], "\x00\x00\x00\x01", 4) != 0) {
+            continue;
+        }
+        err = paramSets.addNalUnit(&data[prevNalOffset], i - prevNalOffset);
+        if (err != OK) {
+            return 0;
+        }
+        prevNalOffset = i + 4;
+    }
+    err = paramSets.addNalUnit(&data[prevNalOffset], csd0->size() - prevNalOffset);
+    if (err != OK) {
+        return 0;
+    }
+    size_t size = hvccSize;
+    err = paramSets.makeHvcc(hvcc, &size, nalSizeLength);
+    if (err != OK) {
+        return 0;
+    }
+    return size;
+}
+
+#if 0
+static void convertMessageToMetaDataInt32(
+        const sp<AMessage> &msg, sp<MetaData> &meta, uint32_t key, const char *name) {
+    int32_t value;
+    if (msg->findInt32(name, &value)) {
+        meta->setInt32(key, value);
+    }
+}
+#endif
+
+static void convertMessageToMetaDataColorAspects(const sp<AMessage> &msg, sp<MetaData> &meta) {
+    // 0 values are unspecified
+    int32_t range = 0, standard = 0, transfer = 0;
+    (void)msg->findInt32("color-range", &range);
+    (void)msg->findInt32("color-standard", &standard);
+    (void)msg->findInt32("color-transfer", &transfer);
+
+    ColorAspects colorAspects;
+    memset(&colorAspects, 0, sizeof(colorAspects));
+    if (CodecBase::convertPlatformColorAspectsToCodecAspects(
+            range, standard, transfer, colorAspects) != OK) {
+        return;
+    }
+
+    // save specified values to meta
+    if (colorAspects.mRange != 0) {
+        meta->setInt32(kKeyColorRange, colorAspects.mRange);
+    }
+    if (colorAspects.mPrimaries != 0) {
+        meta->setInt32(kKeyColorPrimaries, colorAspects.mPrimaries);
+    }
+    if (colorAspects.mTransfer != 0) {
+        meta->setInt32(kKeyTransferFunction, colorAspects.mTransfer);
+    }
+    if (colorAspects.mMatrixCoeffs != 0) {
+        meta->setInt32(kKeyColorMatrix, colorAspects.mMatrixCoeffs);
+    }
 }
 
 void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
@@ -585,6 +1264,15 @@
         meta->setInt32(kKeyIsSyncFrame, 1);
     }
 
+    int32_t avgBitrate = 0;
+    int32_t maxBitrate;
+    if (msg->findInt32("bitrate", &avgBitrate) && avgBitrate > 0) {
+        meta->setInt32(kKeyBitRate, avgBitrate);
+    }
+    if (msg->findInt32("max-bitrate", &maxBitrate) && maxBitrate > 0 && maxBitrate >= avgBitrate) {
+        meta->setInt32(kKeyMaxBitRate, maxBitrate);
+    }
+
     if (mime.startsWith("video/")) {
         int32_t width;
         int32_t height;
@@ -620,6 +1308,15 @@
         if (msg->findInt32("rotation-degrees", &rotationDegrees)) {
             meta->setInt32(kKeyRotation, rotationDegrees);
         }
+
+        if (msg->contains("hdr-static-info")) {
+            HDRStaticInfo info;
+            if (ColorUtils::getHDRStaticInfoFromFormat(msg, &info)) {
+                meta->setData(kKeyHdrStaticInfo, 'hdrS', &info, sizeof(info));
+            }
+        }
+
+        convertMessageToMetaDataColorAspects(msg, meta);
     } else if (mime.startsWith("audio/")) {
         int32_t numChannels;
         if (msg->findInt32("channel-count", &numChannels)) {
@@ -646,6 +1343,11 @@
         if (msg->findInt32("is-adts", &isADTS)) {
             meta->setInt32(kKeyIsADTS, isADTS);
         }
+
+        int32_t pcmEncoding;
+        if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
+            meta->setInt32(kKeyPcmEncoding, pcmEncoding);
+        }
     }
 
     int32_t maxInputSize;
@@ -664,36 +1366,50 @@
     }
 
     int32_t fps;
+    float fpsFloat;
     if (msg->findInt32("frame-rate", &fps) && fps > 0) {
         meta->setInt32(kKeyFrameRate, fps);
+    } else if (msg->findFloat("frame-rate", &fpsFloat)
+            && fpsFloat >= 1 && fpsFloat <= INT32_MAX) {
+        // truncate values to distinguish between e.g. 24 vs 23.976 fps
+        meta->setInt32(kKeyFrameRate, (int32_t)fpsFloat);
     }
 
     // reassemble the csd data into its original form
-    sp<ABuffer> csd0;
+    sp<ABuffer> csd0, csd1, csd2;
     if (msg->findBuffer("csd-0", &csd0)) {
         int csd0size = csd0->size();
         if (mime == MEDIA_MIMETYPE_VIDEO_AVC) {
             sp<ABuffer> csd1;
             if (msg->findBuffer("csd-1", &csd1)) {
-                Vector<char> avcc;
-                int avccSize = csd0size + csd1->size() + 1024;
-                if (avcc.resize(avccSize) < 0) {
-                    ALOGE("error allocating avcc (size %d); abort setting avcc.", avccSize);
-                } else {
-                    size_t outsize = reassembleAVCC(csd0, csd1, avcc.editArray());
-                    meta->setData(kKeyAVCC, kKeyAVCC, avcc.array(), outsize);
-                }
+                std::vector<char> avcc(csd0size + csd1->size() + 1024);
+                size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
+                meta->setData(kKeyAVCC, kKeyAVCC, avcc.data(), outsize);
             }
         } else if (mime == MEDIA_MIMETYPE_AUDIO_AAC || mime == MEDIA_MIMETYPE_VIDEO_MPEG4) {
-            Vector<char> esds;
-            int esdsSize = csd0size + 31;
-            if (esds.resize(esdsSize) < 0) {
-                ALOGE("error allocating esds (size %d); abort setting esds.", esdsSize);
-            } else {
-                // The written ESDS is actually for an audio stream, but it's enough
-                // for transporting the CSD to muxers.
-                reassembleESDS(csd0, esds.editArray());
-                meta->setData(kKeyESDS, kKeyESDS, esds.array(), esds.size());
+            std::vector<char> esds(csd0size + 31);
+            // The written ESDS is actually for an audio stream, but it's enough
+            // for transporting the CSD to muxers.
+            reassembleESDS(csd0, esds.data());
+            meta->setData(kKeyESDS, kKeyESDS, esds.data(), esds.size());
+        } else if (mime == MEDIA_MIMETYPE_VIDEO_HEVC) {
+            std::vector<uint8_t> hvcc(csd0size + 1024);
+            size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
+            meta->setData(kKeyHVCC, kKeyHVCC, hvcc.data(), outsize);
+        } else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
+            meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
+        } else if (mime == MEDIA_MIMETYPE_AUDIO_OPUS) {
+            meta->setData(kKeyOpusHeader, 0, csd0->data(), csd0->size());
+            if (msg->findBuffer("csd-1", &csd1)) {
+                meta->setData(kKeyOpusCodecDelay, 0, csd1->data(), csd1->size());
+            }
+            if (msg->findBuffer("csd-2", &csd2)) {
+                meta->setData(kKeyOpusSeekPreRoll, 0, csd2->data(), csd2->size());
+            }
+        } else if (mime == MEDIA_MIMETYPE_AUDIO_VORBIS) {
+            meta->setData(kKeyVorbisInfo, 0, csd0->data(), csd0->size());
+            if (msg->findBuffer("csd-1", &csd1)) {
+                meta->setData(kKeyVorbisBooks, 0, csd1->data(), csd1->size());
             }
         }
     }
@@ -886,7 +1602,7 @@
     int32_t brate = -1;
     if (!meta->findInt32(kKeyBitRate, &brate)) {
         ALOGV("track of type '%s' does not publish bitrate", mime);
-     }
+    }
     info.bit_rate = brate;
 
 
@@ -1017,5 +1733,37 @@
     *sync = settings;
 }
 
+AString nameForFd(int fd) {
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    AString result;
+    snprintf(buffer, SIZE, "/proc/%d/fd/%d", getpid(), fd);
+    struct stat s;
+    if (lstat(buffer, &s) == 0) {
+        if ((s.st_mode & S_IFMT) == S_IFLNK) {
+            char linkto[256];
+            int len = readlink(buffer, linkto, sizeof(linkto));
+            if(len > 0) {
+                if(len > 255) {
+                    linkto[252] = '.';
+                    linkto[253] = '.';
+                    linkto[254] = '.';
+                    linkto[255] = 0;
+                } else {
+                    linkto[len] = 0;
+                }
+                result.append(linkto);
+            }
+        } else {
+            result.append("unexpected type for ");
+            result.append(buffer);
+        }
+    } else {
+        result.append("couldn't open ");
+        result.append(buffer);
+    }
+    return result;
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/VBRISeeker.cpp b/media/libstagefright/VBRISeeker.cpp
index 8a0fcac..58f2c60 100644
--- a/media/libstagefright/VBRISeeker.cpp
+++ b/media/libstagefright/VBRISeeker.cpp
@@ -149,7 +149,7 @@
 }
 
 bool VBRISeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
-    if (mDurationUs < 0) {
+    if (mDurationUs < 0 || mSegments.size() == 0) {
         return false;
     }
 
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 5fe9bf9..03226c7 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -156,12 +156,12 @@
         lastTime = time;
     }
 
-    int64_t div   = numSamplesToUse * sumXX - sumX * sumX;
+    int64_t div   = (int64_t)numSamplesToUse * sumXX - sumX * sumX;
     if (div == 0) {
         return false;
     }
 
-    int64_t a_nom = numSamplesToUse * sumXY - sumX * sumY;
+    int64_t a_nom = (int64_t)numSamplesToUse * sumXY - sumX * sumY;
     int64_t b_nom = sumXX * sumY            - sumX * sumXY;
     *a = divRound(a_nom, div);
     *b = divRound(b_nom, div);
@@ -437,10 +437,10 @@
                 (renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
             edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
         }
-        mTimeCorrection += mVsyncPeriod / 2 - offset / N;
+        mTimeCorrection += mVsyncPeriod / 2 - offset / (nsecs_t)N;
         renderTime += mTimeCorrection;
         nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
-        edgeRemainder = abs(edgeRemainder / N - mVsyncPeriod / 2);
+        edgeRemainder = abs(edgeRemainder / (nsecs_t)N - mVsyncPeriod / 2);
         if (edgeRemainder <= mVsyncPeriod / 3) {
             correctionLimit /= 2;
         }
@@ -460,14 +460,16 @@
                 mTimeCorrection -= mVsyncPeriod / 2;
                 renderTime -= mVsyncPeriod / 2;
                 nextVsyncTime -= mVsyncPeriod;
-                --vsyncsForLastFrame;
+                if (vsyncsForLastFrame > 0)
+                    --vsyncsForLastFrame;
             } else if (mTimeCorrection < -correctionLimit &&
                     (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) {
                 // add a VSYNC
                 mTimeCorrection += mVsyncPeriod / 2;
                 renderTime += mVsyncPeriod / 2;
                 nextVsyncTime += mVsyncPeriod;
-                ++vsyncsForLastFrame;
+                if (vsyncsForLastFrame < ULONG_MAX)
+                    ++vsyncsForLastFrame;
             }
             ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
         }
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 335ac84..38a2a06 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -20,6 +20,7 @@
 
 #include "include/WAVExtractor.h"
 
+#include <audio_utils/primitives.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBufferGroup.h>
@@ -36,6 +37,7 @@
 
 enum {
     WAVE_FORMAT_PCM        = 0x0001,
+    WAVE_FORMAT_IEEE_FLOAT = 0x0003,
     WAVE_FORMAT_ALAW       = 0x0006,
     WAVE_FORMAT_MULAW      = 0x0007,
     WAVE_FORMAT_MSGSM      = 0x0031,
@@ -116,7 +118,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-sp<MediaSource> WAVExtractor::getTrack(size_t index) {
+sp<IMediaSource> WAVExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index > 0) {
         return NULL;
     }
@@ -177,6 +179,7 @@
 
             mWaveFormat = U16_LE_AT(formatSpec);
             if (mWaveFormat != WAVE_FORMAT_PCM
+                    && mWaveFormat != WAVE_FORMAT_IEEE_FLOAT
                     && mWaveFormat != WAVE_FORMAT_ALAW
                     && mWaveFormat != WAVE_FORMAT_MULAW
                     && mWaveFormat != WAVE_FORMAT_MSGSM
@@ -193,15 +196,17 @@
             }
 
             mNumChannels = U16_LE_AT(&formatSpec[2]);
+
+            if (mNumChannels < 1 || mNumChannels > 8) {
+                ALOGE("Unsupported number of channels (%d)", mNumChannels);
+                return ERROR_UNSUPPORTED;
+            }
+
             if (mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
                 if (mNumChannels != 1 && mNumChannels != 2) {
                     ALOGW("More than 2 channels (%d) in non-WAVE_EXT, unknown channel mask",
                             mNumChannels);
                 }
-            } else {
-                if (mNumChannels < 1 && mNumChannels > 8) {
-                    return ERROR_UNSUPPORTED;
-                }
             }
 
             mSampleRate = U32_LE_AT(&formatSpec[4]);
@@ -212,24 +217,6 @@
 
             mBitsPerSample = U16_LE_AT(&formatSpec[14]);
 
-            if (mWaveFormat == WAVE_FORMAT_PCM
-                    || mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
-                if (mBitsPerSample != 8 && mBitsPerSample != 16
-                    && mBitsPerSample != 24) {
-                    return ERROR_UNSUPPORTED;
-                }
-            } else if (mWaveFormat == WAVE_FORMAT_MSGSM) {
-                if (mBitsPerSample != 0) {
-                    return ERROR_UNSUPPORTED;
-                }
-            } else {
-                CHECK(mWaveFormat == WAVE_FORMAT_MULAW
-                        || mWaveFormat == WAVE_FORMAT_ALAW);
-                if (mBitsPerSample != 8) {
-                    return ERROR_UNSUPPORTED;
-                }
-            }
-
             if (mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
                 uint16_t validBitsPerSample = U16_LE_AT(&formatSpec[18]);
                 if (validBitsPerSample != mBitsPerSample) {
@@ -261,17 +248,34 @@
                 // In a WAVE_EXT header, the first two bytes of the GUID stored at byte 24 contain
                 // the sample format, using the same definitions as a regular WAV header
                 mWaveFormat = U16_LE_AT(&formatSpec[24]);
-                if (mWaveFormat != WAVE_FORMAT_PCM
-                        && mWaveFormat != WAVE_FORMAT_ALAW
-                        && mWaveFormat != WAVE_FORMAT_MULAW) {
-                    return ERROR_UNSUPPORTED;
-                }
                 if (memcmp(&formatSpec[26], WAVEEXT_SUBFORMAT, 14)) {
                     ALOGE("unsupported GUID");
                     return ERROR_UNSUPPORTED;
                 }
             }
 
+            if (mWaveFormat == WAVE_FORMAT_PCM) {
+                if (mBitsPerSample != 8 && mBitsPerSample != 16
+                    && mBitsPerSample != 24 && mBitsPerSample != 32) {
+                    return ERROR_UNSUPPORTED;
+                }
+            } else if (mWaveFormat == WAVE_FORMAT_IEEE_FLOAT) {
+                if (mBitsPerSample != 32) {  // TODO we don't support double
+                    return ERROR_UNSUPPORTED;
+                }
+            }
+            else if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+                if (mBitsPerSample != 0) {
+                    return ERROR_UNSUPPORTED;
+                }
+            } else if (mWaveFormat == WAVE_FORMAT_MULAW || mWaveFormat == WAVE_FORMAT_ALAW) {
+                if (mBitsPerSample != 8) {
+                    return ERROR_UNSUPPORTED;
+                }
+            } else {
+                return ERROR_UNSUPPORTED;
+            }
+
             mValidFormat = true;
         } else if (!memcmp(chunkHeader, "data", 4)) {
             if (mValidFormat) {
@@ -282,6 +286,7 @@
 
                 switch (mWaveFormat) {
                     case WAVE_FORMAT_PCM:
+                    case WAVE_FORMAT_IEEE_FLOAT:
                         mTrackMeta->setCString(
                                 kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
                         break;
@@ -303,6 +308,7 @@
                 mTrackMeta->setInt32(kKeyChannelCount, mNumChannels);
                 mTrackMeta->setInt32(kKeyChannelMask, mChannelMask);
                 mTrackMeta->setInt32(kKeySampleRate, mSampleRate);
+                mTrackMeta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
 
                 int64_t durationUs = 0;
                 if (mWaveFormat == WAVE_FORMAT_MSGSM) {
@@ -311,9 +317,17 @@
                         1000000LL * (mDataSize / 65 * 320) / 8000;
                 } else {
                     size_t bytesPerSample = mBitsPerSample >> 3;
+
+                    if (!bytesPerSample || !mNumChannels)
+                        return ERROR_MALFORMED;
+
+                    size_t num_samples = mDataSize / (mNumChannels * bytesPerSample);
+
+                    if (!mSampleRate)
+                        return ERROR_MALFORMED;
+
                     durationUs =
-                        1000000LL * (mDataSize / (mNumChannels * bytesPerSample))
-                            / mSampleRate;
+                        1000000LL * num_samples / mSampleRate;
                 }
 
                 mTrackMeta->setInt64(kKeyDuration, durationUs);
@@ -465,46 +479,39 @@
 
     buffer->set_range(0, n);
 
-    if (mWaveFormat == WAVE_FORMAT_PCM || mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
+    // TODO: add capability to return data as float PCM instead of 16 bit PCM.
+    if (mWaveFormat == WAVE_FORMAT_PCM) {
         if (mBitsPerSample == 8) {
             // Convert 8-bit unsigned samples to 16-bit signed.
 
+            // Create new buffer with 2 byte wide samples
             MediaBuffer *tmp;
             CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK);
-
-            // The new buffer holds the sample number of samples, but each
-            // one is 2 bytes wide.
             tmp->set_range(0, 2 * n);
 
-            int16_t *dst = (int16_t *)tmp->data();
-            const uint8_t *src = (const uint8_t *)buffer->data();
-            ssize_t numBytes = n;
-
-            while (numBytes-- > 0) {
-                *dst++ = ((int16_t)(*src) - 128) * 256;
-                ++src;
-            }
-
+            memcpy_to_i16_from_u8((int16_t *)tmp->data(), (const uint8_t *)buffer->data(), n);
             buffer->release();
             buffer = tmp;
         } else if (mBitsPerSample == 24) {
-            // Convert 24-bit signed samples to 16-bit signed.
+            // Convert 24-bit signed samples to 16-bit signed in place
+            const size_t numSamples = n / 3;
 
-            const uint8_t *src =
-                (const uint8_t *)buffer->data() + buffer->range_offset();
-            int16_t *dst = (int16_t *)src;
+            memcpy_to_i16_from_p24((int16_t *)buffer->data(), (const uint8_t *)buffer->data(), numSamples);
+            buffer->set_range(0, 2 * numSamples);
+        }  else if (mBitsPerSample == 32) {
+            // Convert 32-bit signed samples to 16-bit signed in place
+            const size_t numSamples = n / 4;
 
-            size_t numSamples = buffer->range_length() / 3;
-            for (size_t i = 0; i < numSamples; ++i) {
-                int32_t x = (int32_t)(src[0] | src[1] << 8 | src[2] << 16);
-                x = (x << 8) >> 8;  // sign extension
+            memcpy_to_i16_from_i32((int16_t *)buffer->data(), (const int32_t *)buffer->data(), numSamples);
+            buffer->set_range(0, 2 * numSamples);
+        }
+    } else if (mWaveFormat == WAVE_FORMAT_IEEE_FLOAT) {
+        if (mBitsPerSample == 32) {
+            // Convert 32-bit float samples to 16-bit signed in place
+            const size_t numSamples = n / 4;
 
-                x = x >> 8;
-                *dst++ = (int16_t)x;
-                src += 3;
-            }
-
-            buffer->set_range(buffer->range_offset(), 2 * numSamples);
+            memcpy_to_i16_from_float((int16_t *)buffer->data(), (const float *)buffer->data(), numSamples);
+            buffer->set_range(0, 2 * numSamples);
         }
     }
 
diff --git a/media/libstagefright/WVMExtractor.cpp b/media/libstagefright/WVMExtractor.cpp
index bc48272..d1b2f54 100644
--- a/media/libstagefright/WVMExtractor.cpp
+++ b/media/libstagefright/WVMExtractor.cpp
@@ -95,7 +95,7 @@
     return (mImpl != NULL) ? mImpl->countTracks() : 0;
 }
 
-sp<MediaSource> WVMExtractor::getTrack(size_t index) {
+sp<IMediaSource> WVMExtractor::getTrack(size_t index) {
     if (mImpl == NULL) {
         return NULL;
     }
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 8ef2dca..ccf3440 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -41,10 +41,37 @@
     return x + (1u << numZeroes) - 1;
 }
 
+unsigned parseUEWithFallback(ABitReader *br, unsigned fallback) {
+    unsigned numZeroes = 0;
+    while (br->getBitsWithFallback(1, 1) == 0) {
+        ++numZeroes;
+    }
+    uint32_t x;
+    if (numZeroes < 32) {
+        if (br->getBitsGraceful(numZeroes, &x)) {
+            return x + (1u << numZeroes) - 1;
+        } else {
+            return fallback;
+        }
+    } else {
+        br->skipBits(numZeroes);
+        return fallback;
+    }
+}
+
 signed parseSE(ABitReader *br) {
     unsigned codeNum = parseUE(br);
 
-    return (codeNum & 1) ? (codeNum + 1) / 2 : -(codeNum / 2);
+    return (codeNum & 1) ? (codeNum + 1) / 2 : -signed(codeNum / 2);
+}
+
+signed parseSEWithFallback(ABitReader *br, signed fallback) {
+    // NOTE: parseUE cannot normally return ~0 as the max supported value is 0xFFFE
+    unsigned codeNum = parseUEWithFallback(br, ~0U);
+    if (codeNum == ~0U) {
+        return fallback;
+    }
+    return (codeNum & 1) ? (codeNum + 1) / 2 : -signed(codeNum / 2);
 }
 
 static void skipScalingList(ABitReader *br, size_t sizeOfScalingList) {
diff --git a/media/libstagefright/codecs/aacdec/Android.mk b/media/libstagefright/codecs/aacdec/Android.mk
index afb00aa..84ea708 100644
--- a/media/libstagefright/codecs/aacdec/Android.mk
+++ b/media/libstagefright/codecs/aacdec/Android.mk
@@ -19,6 +19,8 @@
 LOCAL_CFLAGS :=
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 8ddff90..ff76bc8 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -26,6 +26,7 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MediaErrors.h>
+#include <utils/misc.h>
 
 #include <math.h>
 
@@ -55,6 +56,14 @@
     params->nVersion.s.nStep = 0;
 }
 
+static const OMX_U32 kSupportedProfiles[] = {
+    OMX_AUDIO_AACObjectLC,
+    OMX_AUDIO_AACObjectHE,
+    OMX_AUDIO_AACObjectHE_PS,
+    OMX_AUDIO_AACObjectLD,
+    OMX_AUDIO_AACObjectELD,
+};
+
 SoftAAC2::SoftAAC2(
         const char *name,
         const OMX_CALLBACKTYPE *callbacks,
@@ -198,12 +207,16 @@
         mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET, DRC_DEFAULT_MOBILE_ENC_LEVEL);
     }
 
+    // By default, the decoder creates a 5.1 channel downmix signal.
+    // For seven and eight channel input streams, enable 6.1 and 7.1 channel output
+    aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, -1);
+
     return status;
 }
 
 OMX_ERRORTYPE SoftAAC2::internalGetParameter(
         OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
+    switch ((OMX_U32) index) {
         case OMX_IndexParamAudioAac:
         {
             OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
@@ -279,6 +292,29 @@
             return OMX_ErrorNone;
         }
 
+        case OMX_IndexParamAudioProfileQuerySupported:
+        {
+            OMX_AUDIO_PARAM_ANDROID_PROFILETYPE *profileParams =
+                (OMX_AUDIO_PARAM_ANDROID_PROFILETYPE *)params;
+
+            if (!isValidOMXParam(profileParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (profileParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (profileParams->nProfileIndex >= NELEM(kSupportedProfiles)) {
+                return OMX_ErrorNoMore;
+            }
+
+            profileParams->eProfile =
+                kSupportedProfiles[profileParams->nProfileIndex];
+
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalGetParameter(index, params);
     }
@@ -341,7 +377,7 @@
 
             // for the following parameters of the OMX_AUDIO_PARAM_AACPROFILETYPE structure,
             // a value of -1 implies the parameter is not set by the application:
-            //   nMaxOutputChannels     uses default platform properties, see configureDownmix()
+            //   nMaxOutputChannels     -1 by default 
             //   nDrcCut                uses default platform properties, see initDecoder()
             //   nDrcBoost                idem
             //   nHeavyCompression        idem
@@ -425,18 +461,6 @@
     return mInputBufferCount > 0;
 }
 
-void SoftAAC2::configureDownmix() const {
-    char value[PROPERTY_VALUE_MAX];
-    if (!(property_get("media.aac_51_output_enabled", value, NULL)
-            && (!strcmp(value, "1") || !strcasecmp(value, "true")))) {
-        ALOGI("limiting to stereo output");
-        aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
-        // By default, the decoder creates a 5.1 channel downmix signal
-        // for seven and eight channel input streams. To enable 6.1 and 7.1 channel output
-        // use aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, -1)
-    }
-}
-
 bool SoftAAC2::outputDelayRingBufferPutSamples(INT_PCM *samples, int32_t numSamples) {
     if (numSamples == 0) {
         return true;
@@ -571,7 +595,6 @@
                 notifyEmptyBufferDone(inHeader);
                 inHeader = NULL;
 
-                configureDownmix();
                 // Only send out port settings changed event if both sample rate
                 // and numChannels are valid.
                 if (mStreamInfo->sampleRate && mStreamInfo->numChannels) {
@@ -770,31 +793,31 @@
                  * Thus, we could not say for sure whether a stream is
                  * AAC+/eAAC+ until the first data frame is decoded.
                  */
-                if (mInputBufferCount <= 2 || mOutputBufferCount > 1) { // TODO: <= 1
-                    if (mStreamInfo->sampleRate != prevSampleRate ||
-                        mStreamInfo->numChannels != prevNumChannels) {
-                        ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
-                              prevSampleRate, mStreamInfo->sampleRate,
-                              prevNumChannels, mStreamInfo->numChannels);
-
-                        notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
-                        mOutputPortSettingsChange = AWAITING_DISABLED;
-
-                        if (inHeader && inHeader->nFilledLen == 0) {
-                            inInfo->mOwnedByUs = false;
-                            mInputBufferCount++;
-                            inQueue.erase(inQueue.begin());
-                            mLastInHeader = NULL;
-                            inInfo = NULL;
-                            notifyEmptyBufferDone(inHeader);
-                            inHeader = NULL;
-                        }
+                if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
+                    if ((mInputBufferCount > 2) && (mOutputBufferCount <= 1)) {
+                        ALOGW("Invalid AAC stream");
+                        mSignalledError = true;
+                        notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
                         return;
                     }
-                } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) {
-                    ALOGW("Invalid AAC stream");
-                    mSignalledError = true;
-                    notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL);
+                } else if ((mStreamInfo->sampleRate != prevSampleRate) ||
+                           (mStreamInfo->numChannels != prevNumChannels)) {
+                    ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
+                          prevSampleRate, mStreamInfo->sampleRate,
+                          prevNumChannels, mStreamInfo->numChannels);
+
+                    notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+                    mOutputPortSettingsChange = AWAITING_DISABLED;
+
+                    if (inHeader && inHeader->nFilledLen == 0) {
+                        inInfo->mOwnedByUs = false;
+                        mInputBufferCount++;
+                        inQueue.erase(inQueue.begin());
+                        mLastInHeader = NULL;
+                        inInfo = NULL;
+                        notifyEmptyBufferDone(inHeader);
+                        inHeader = NULL;
+                    }
                     return;
                 }
                 if (inHeader && inHeader->nFilledLen == 0) {
@@ -829,10 +852,10 @@
             while (mOutputDelayCompensated > 0) {
                 // a buffer big enough for MAX_CHANNEL_COUNT channels of decoded HE-AAC
                 INT_PCM tmpOutBuffer[2048 * MAX_CHANNEL_COUNT];
- 
-                 // run DRC check
-                 mDrcWrap.submitStreamData(mStreamInfo);
-                 mDrcWrap.update();
+
+                // run DRC check
+                mDrcWrap.submitStreamData(mStreamInfo);
+                mDrcWrap.update();
 
                 AAC_DECODER_ERROR decoderErr =
                     aacDecoder_DecodeFrame(mAACDecoder,
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index c3e4459..a1cf285 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -74,7 +74,6 @@
     void initPorts();
     status_t initDecoder();
     bool isConfigured() const;
-    void configureDownmix() const;
     void drainDecoder();
 
 //      delay compensation
diff --git a/media/libstagefright/codecs/aacenc/AACEncoder.cpp b/media/libstagefright/codecs/aacenc/AACEncoder.cpp
index bebb9dc..9e596ff 100644
--- a/media/libstagefright/codecs/aacenc/AACEncoder.cpp
+++ b/media/libstagefright/codecs/aacenc/AACEncoder.cpp
@@ -30,7 +30,7 @@
 
 namespace android {
 
-AACEncoder::AACEncoder(const sp<MediaSource> &source, const sp<MetaData> &meta)
+AACEncoder::AACEncoder(const sp<IMediaSource> &source, const sp<MetaData> &meta)
     : mSource(source),
       mMeta(meta),
       mStarted(false),
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index 58ec3ba..266f01b 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -1,6 +1,5 @@
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
-include frameworks/av/media/libstagefright/codecs/common/Config.mk
 
 AAC_LIBRARY = fraunhofer
 
@@ -35,24 +34,28 @@
 	src/transform.c \
 	src/memalign.c
 
-ifeq ($(VOTT), v5)
-LOCAL_SRC_FILES += \
-	src/asm/ARMV5E/AutoCorrelation_v5.s \
-	src/asm/ARMV5E/band_nrg_v5.s \
-	src/asm/ARMV5E/CalcWindowEnergy_v5.s \
-	src/asm/ARMV5E/PrePostMDCT_v5.s \
-	src/asm/ARMV5E/R4R8First_v5.s \
-	src/asm/ARMV5E/Radix4FFT_v5.s
-endif
+ifneq ($(ARCH_ARM_HAVE_NEON),true)
+    LOCAL_SRC_FILES_arm := \
+        src/asm/ARMV5E/AutoCorrelation_v5.s \
+        src/asm/ARMV5E/band_nrg_v5.s \
+        src/asm/ARMV5E/CalcWindowEnergy_v5.s \
+        src/asm/ARMV5E/PrePostMDCT_v5.s \
+        src/asm/ARMV5E/R4R8First_v5.s \
+        src/asm/ARMV5E/Radix4FFT_v5.s
 
-ifeq ($(VOTT), v7)
-LOCAL_SRC_FILES += \
-	src/asm/ARMV5E/AutoCorrelation_v5.s \
-	src/asm/ARMV5E/band_nrg_v5.s \
-	src/asm/ARMV5E/CalcWindowEnergy_v5.s \
-	src/asm/ARMV7/PrePostMDCT_v7.s \
-	src/asm/ARMV7/R4R8First_v7.s \
-	src/asm/ARMV7/Radix4FFT_v7.s
+    LOCAL_CFLAGS_arm := -DARMV5E -DARM_INASM -DARMV5_INASM
+    LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/src/asm/ARMV5E
+else
+    LOCAL_SRC_FILES_arm := \
+        src/asm/ARMV5E/AutoCorrelation_v5.s \
+        src/asm/ARMV5E/band_nrg_v5.s \
+        src/asm/ARMV5E/CalcWindowEnergy_v5.s \
+        src/asm/ARMV7/PrePostMDCT_v7.s \
+        src/asm/ARMV7/R4R8First_v7.s \
+        src/asm/ARMV7/Radix4FFT_v7.s
+    LOCAL_CFLAGS_arm := -DARMV5E -DARMV7Neon -DARM_INASM -DARMV5_INASM -DARMV6_INASM
+    LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/src/asm/ARMV5E
+    LOCAL_C_INCLUDES_arm += $(LOCAL_PATH)/src/asm/ARMV7
 endif
 
 LOCAL_MODULE := libstagefright_aacenc
@@ -71,18 +74,9 @@
 	$(LOCAL_PATH)/inc \
 	$(LOCAL_PATH)/basic_op
 
-ifeq ($(VOTT), v5)
-LOCAL_CFLAGS += -DARMV5E -DARM_INASM -DARMV5_INASM
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
-endif
-
-ifeq ($(VOTT), v7)
-LOCAL_CFLAGS += -DARMV5E -DARMV7Neon -DARM_INASM -DARMV5_INASM -DARMV6_INASM
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7
-endif
-
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
 
@@ -109,6 +103,8 @@
   LOCAL_CFLAGS :=
 
   LOCAL_CFLAGS += -Werror
+  LOCAL_CLANG := true
+  LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
   LOCAL_STATIC_LIBRARIES := libFraunhoferAAC
 
@@ -133,6 +129,8 @@
   LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
   LOCAL_CFLAGS += -Werror
+  LOCAL_CLANG := true
+  LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
   LOCAL_STATIC_LIBRARIES := \
           libstagefright_aacenc
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
index e8dabed..63215ec 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -20,9 +20,11 @@
 
 #include "SoftAACEncoder2.h"
 #include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/hexdump.h>
+#include <utils/misc.h>
 
 namespace android {
 
@@ -35,6 +37,14 @@
     params->nVersion.s.nStep = 0;
 }
 
+static const OMX_U32 kSupportedProfiles[] = {
+    OMX_AUDIO_AACObjectLC,
+    OMX_AUDIO_AACObjectHE,
+    OMX_AUDIO_AACObjectHE_PS,
+    OMX_AUDIO_AACObjectLD,
+    OMX_AUDIO_AACObjectELD,
+};
+
 SoftAACEncoder2::SoftAACEncoder2(
         const char *name,
         const OMX_CALLBACKTYPE *callbacks,
@@ -117,7 +127,7 @@
 
 OMX_ERRORTYPE SoftAACEncoder2::internalGetParameter(
         OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
+    switch ((OMX_U32) index) {
         case OMX_IndexParamAudioPortFormat:
         {
             OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
@@ -232,6 +242,29 @@
             return OMX_ErrorNone;
         }
 
+        case OMX_IndexParamAudioProfileQuerySupported:
+        {
+            OMX_AUDIO_PARAM_ANDROID_PROFILETYPE *profileParams =
+                (OMX_AUDIO_PARAM_ANDROID_PROFILETYPE *)params;
+
+            if (!isValidOMXParam(profileParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (profileParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (profileParams->nProfileIndex >= NELEM(kSupportedProfiles)) {
+                return OMX_ErrorNoMore;
+            }
+
+            profileParams->eProfile =
+                kSupportedProfiles[profileParams->nProfileIndex];
+
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalGetParameter(index, params);
     }
diff --git a/media/libstagefright/codecs/amrnb/common/Android.mk b/media/libstagefright/codecs/amrnb/common/Android.mk
index 5e632a6..15220a4 100644
--- a/media/libstagefright/codecs/amrnb/common/Android.mk
+++ b/media/libstagefright/codecs/amrnb/common/Android.mk
@@ -7,7 +7,6 @@
  	src/bitno_tab.cpp \
  	src/bitreorder_tab.cpp \
  	src/bits2prm.cpp \
- 	src/bytesused.cpp \
  	src/c2_9pf_tab.cpp \
  	src/copy.cpp \
  	src/div_32.cpp \
@@ -38,7 +37,6 @@
  	src/mult_r.cpp \
  	src/norm_l.cpp \
  	src/norm_s.cpp \
- 	src/overflow_tbl.cpp \
  	src/ph_disp_tab.cpp \
  	src/pow2.cpp \
  	src/pow2_tbl.cpp \
@@ -70,6 +68,9 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_IMPORT_REF= -DOSCL_EXPORT_REF=
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+#addressing b/25409744
+#LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_MODULE := libstagefright_amrnb_common
 
diff --git a/media/libstagefright/codecs/amrnb/common/include/basic_op_c_equivalent.h b/media/libstagefright/codecs/amrnb/common/include/basic_op_c_equivalent.h
index c4e4d4f..8f0867a 100644
--- a/media/libstagefright/codecs/amrnb/common/include/basic_op_c_equivalent.h
+++ b/media/libstagefright/codecs/amrnb/common/include/basic_op_c_equivalent.h
@@ -115,6 +115,7 @@
      Returns:
         L_sum = 32-bit sum of L_var1 and L_var2 (Word32)
     */
+    __attribute__((no_sanitize("integer")))
     static inline Word32 L_add(Word32 L_var1, Word32 L_var2, Flag *pOverflow)
     {
         Word32 L_sum;
diff --git a/media/libstagefright/codecs/amrnb/common/include/bytesused.h b/media/libstagefright/codecs/amrnb/common/include/bytesused.h
deleted file mode 100644
index 934efbe..0000000
--- a/media/libstagefright/codecs/amrnb/common/include/bytesused.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/****************************************************************************************
-Portions of this file are derived from the following 3GPP standard:
-
-    3GPP TS 26.073
-    ANSI-C code for the Adaptive Multi-Rate (AMR) speech codec
-    Available from http://www.3gpp.org
-
-(C) 2004, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TTA, TTC)
-Permission to distribute, modify and use this file under the standard license
-terms listed above has been obtained from the copyright holder.
-****************************************************************************************/
-/*
-
- Pathname: .audio/gsm-amr/c/include/BytesUsed.h
-
-------------------------------------------------------------------------------
- REVISION HISTORY
-
- Description: Added #ifdef __cplusplus after Include section.
-
- Who:                       Date:
- Description:
-
-------------------------------------------------------------------------------
- INCLUDE DESCRIPTION
-
- This file declares a table BytesUsed.
-
-------------------------------------------------------------------------------
-*/
-
-/*----------------------------------------------------------------------------
-; CONTINUE ONLY IF NOT ALREADY DEFINED
-----------------------------------------------------------------------------*/
-#ifndef BYTESUSED_H
-#define BYTESUSED_H
-
-/*----------------------------------------------------------------------------
-; INCLUDES
-----------------------------------------------------------------------------*/
-
-/*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-    /*----------------------------------------------------------------------------
-    ; MACROS
-    ; Define module specific macros here
-    ----------------------------------------------------------------------------*/
-
-    /*----------------------------------------------------------------------------
-    ; DEFINES
-    ; Include all pre-processor statements here.
-    ----------------------------------------------------------------------------*/
-
-    /*----------------------------------------------------------------------------
-    ; EXTERNAL VARIABLES REFERENCES
-    ; Declare variables used in this module but defined elsewhere
-    ----------------------------------------------------------------------------*/
-    extern const short BytesUsed[];
-
-    /*----------------------------------------------------------------------------
-    ; SIMPLE TYPEDEF'S
-    ----------------------------------------------------------------------------*/
-
-    /*----------------------------------------------------------------------------
-    ; ENUMERATED TYPEDEF'S
-    ----------------------------------------------------------------------------*/
-
-    /*----------------------------------------------------------------------------
-    ; STRUCTURES TYPEDEF'S
-    ----------------------------------------------------------------------------*/
-
-
-    /*----------------------------------------------------------------------------
-    ; GLOBAL FUNCTION DEFINITIONS
-    ; Function Prototype declaration
-    ----------------------------------------------------------------------------*/
-
-
-    /*----------------------------------------------------------------------------
-    ; END
-    ----------------------------------------------------------------------------*/
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-
diff --git a/media/libstagefright/codecs/amrnb/common/src/az_lsp.cpp b/media/libstagefright/codecs/amrnb/common/src/az_lsp.cpp
index 976b1a6..459c3c3 100644
--- a/media/libstagefright/codecs/amrnb/common/src/az_lsp.cpp
+++ b/media/libstagefright/codecs/amrnb/common/src/az_lsp.cpp
@@ -237,7 +237,9 @@
 
 ------------------------------------------------------------------------------
 */
-
+#ifdef __clang__
+__attribute__((no_sanitize("integer")))
+#endif
 static Word16 Chebps(Word16 x,
                      Word16 f[], /* (n) */
                      Word16 n,
diff --git a/media/libstagefright/codecs/amrnb/common/src/bytesused.cpp b/media/libstagefright/codecs/amrnb/common/src/bytesused.cpp
deleted file mode 100644
index b61bac4..0000000
--- a/media/libstagefright/codecs/amrnb/common/src/bytesused.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/****************************************************************************************
-Portions of this file are derived from the following 3GPP standard:
-
-    3GPP TS 26.073
-    ANSI-C code for the Adaptive Multi-Rate (AMR) speech codec
-    Available from http://www.3gpp.org
-
-(C) 2004, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TTA, TTC)
-Permission to distribute, modify and use this file under the standard license
-terms listed above has been obtained from the copyright holder.
-****************************************************************************************/
-/*
-
- Pathname: ./audio/gsm-amr/c/src/BytesUsed.c
-
-------------------------------------------------------------------------------
- REVISION HISTORY
-
- Description: Corrected entries for all SID frames and updated function
-              description. Updated copyright year.
-
- Description: Added #ifdef __cplusplus and removed "extern" from table
-              definition. Removed corresponding header file from Include
-              section.
-
- Description: Put "extern" back.
-
- Who:                       Date:
- Description:
-
-------------------------------------------------------------------------------
- INPUT AND OUTPUT DEFINITIONS
-
- Inputs:
-    None
-
- Local Stores/Buffers/Pointers Needed:
-    None
-
- Global Stores/Buffers/Pointers Needed:
-    None
-
- Outputs:
-    None
-
- Pointers and Buffers Modified:
-    None
-
- Local Stores Modified:
-    None
-
- Global Stores Modified:
-    None
-
-------------------------------------------------------------------------------
- FUNCTION DESCRIPTION
-
- This function creates a table called BytesUsed that holds the value that
- describes the number of bytes required to hold one frame worth of data in
- the WMF (non-IF2) frame format. Each table entry is the sum of the frame
- type byte and the number of bytes used up by the core speech data for each
- 3GPP frame type.
-
-------------------------------------------------------------------------------
- REQUIREMENTS
-
- None
-
-------------------------------------------------------------------------------
- REFERENCES
-
- [1] "AMR Speech Codec Frame Structure", 3GPP TS 26.101 version 4.1.0
-     Release 4, June 2001, page 13.
-
-------------------------------------------------------------------------------
- PSEUDO-CODE
-
-
-------------------------------------------------------------------------------
- RESOURCES USED
-   When the code is written for a specific target processor the
-     the resources used should be documented below.
-
- STACK USAGE: [stack count for this module] + [variable to represent
-          stack usage for each subroutine called]
-
-     where: [stack usage variable] = stack usage for [subroutine
-         name] (see [filename].ext)
-
- DATA MEMORY USED: x words
-
- PROGRAM MEMORY USED: x words
-
- CLOCK CYCLES: [cycle count equation for this module] + [variable
-           used to represent cycle count for each subroutine
-           called]
-
-     where: [cycle count variable] = cycle count for [subroutine
-        name] (see [filename].ext)
-
-------------------------------------------------------------------------------
-*/
-
-
-/*----------------------------------------------------------------------------
-; INCLUDES
-----------------------------------------------------------------------------*/
-#include "typedef.h"
-
-/*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-    /*----------------------------------------------------------------------------
-    ; MACROS
-    ; Define module specific macros here
-    ----------------------------------------------------------------------------*/
-
-
-    /*----------------------------------------------------------------------------
-    ; DEFINES
-    ; Include all pre-processor statements here. Include conditional
-    ; compile variables also.
-    ----------------------------------------------------------------------------*/
-
-    /*----------------------------------------------------------------------------
-    ; LOCAL FUNCTION DEFINITIONS
-    ; Function Prototype declaration
-    ----------------------------------------------------------------------------*/
-
-
-    /*----------------------------------------------------------------------------
-    ; LOCAL STORE/BUFFER/POINTER DEFINITIONS
-    ; Variable declaration - defined here and used outside this module
-    ----------------------------------------------------------------------------*/
-    const short BytesUsed[16] =
-    {
-        13, /* 4.75 */
-        14, /* 5.15 */
-        16, /* 5.90 */
-        18, /* 6.70 */
-        20, /* 7.40 */
-        21, /* 7.95 */
-        27, /* 10.2 */
-        32, /* 12.2 */
-        6, /* GsmAmr comfort noise */
-        7, /* Gsm-Efr comfort noise */
-        6, /* IS-641 comfort noise */
-        6, /* Pdc-Efr comfort noise */
-        0, /* future use */
-        0, /* future use */
-        0, /* future use */
-        1 /* No transmission */
-    };
-    /*----------------------------------------------------------------------------
-    ; EXTERNAL FUNCTION REFERENCES
-    ; Declare functions defined elsewhere and referenced in this module
-    ----------------------------------------------------------------------------*/
-
-
-    /*----------------------------------------------------------------------------
-    ; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES
-    ; Declare variables used in this module but defined elsewhere
-    ----------------------------------------------------------------------------*/
-
-
-    /*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-}
-#endif
-
-/*----------------------------------------------------------------------------
-; FUNCTION CODE
-----------------------------------------------------------------------------*/
-
-/*----------------------------------------------------------------------------
-; Define all local variables
-----------------------------------------------------------------------------*/
-
-
-/*----------------------------------------------------------------------------
-; Function body here
-----------------------------------------------------------------------------*/
-
-
-/*----------------------------------------------------------------------------
-; Return nothing or data or data pointer
-----------------------------------------------------------------------------*/
-
diff --git a/media/libstagefright/codecs/amrnb/common/src/overflow_tbl.cpp b/media/libstagefright/codecs/amrnb/common/src/overflow_tbl.cpp
deleted file mode 100644
index c4a016d..0000000
--- a/media/libstagefright/codecs/amrnb/common/src/overflow_tbl.cpp
+++ /dev/null
@@ -1,174 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/****************************************************************************************
-Portions of this file are derived from the following 3GPP standard:
-
-    3GPP TS 26.073
-    ANSI-C code for the Adaptive Multi-Rate (AMR) speech codec
-    Available from http://www.3gpp.org
-
-(C) 2004, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TTA, TTC)
-Permission to distribute, modify and use this file under the standard license
-terms listed above has been obtained from the copyright holder.
-****************************************************************************************/
-/*
-
- Filename: /audio/gsm_amr/c/src/overflow_tbl.c
-
-------------------------------------------------------------------------------
- REVISION HISTORY
-
- Description: Added #ifdef __cplusplus and removed "extern" from table
-              definition.
-
- Description: Put "extern" back.
-
- Who:                       Date:
- Description:
-
-------------------------------------------------------------------------------
- MODULE DESCRIPTION
-
- This file contains the declaration for overflow_tbl[] used by the l_shl()
- and l_shr() functions.
-
-------------------------------------------------------------------------------
-*/
-
-/*----------------------------------------------------------------------------
-; INCLUDES
-----------------------------------------------------------------------------*/
-#include "typedef.h"
-
-/*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-    /*----------------------------------------------------------------------------
-    ; MACROS
-    ; [Define module specific macros here]
-    ----------------------------------------------------------------------------*/
-
-    /*----------------------------------------------------------------------------
-    ; DEFINES
-    ; [Include all pre-processor statements here. Include conditional
-    ; compile variables also.]
-    ----------------------------------------------------------------------------*/
-
-    /*----------------------------------------------------------------------------
-    ; LOCAL FUNCTION DEFINITIONS
-    ; [List function prototypes here]
-    ----------------------------------------------------------------------------*/
-
-    /*----------------------------------------------------------------------------
-    ; LOCAL VARIABLE DEFINITIONS
-    ; [Variable declaration - defined here and used outside this module]
-    ----------------------------------------------------------------------------*/
-    const Word32 overflow_tbl [32]   = {0x7fffffffL, 0x3fffffffL,
-        0x1fffffffL, 0x0fffffffL,
-        0x07ffffffL, 0x03ffffffL,
-        0x01ffffffL, 0x00ffffffL,
-        0x007fffffL, 0x003fffffL,
-        0x001fffffL, 0x000fffffL,
-        0x0007ffffL, 0x0003ffffL,
-        0x0001ffffL, 0x0000ffffL,
-        0x00007fffL, 0x00003fffL,
-        0x00001fffL, 0x00000fffL,
-        0x000007ffL, 0x000003ffL,
-        0x000001ffL, 0x000000ffL,
-        0x0000007fL, 0x0000003fL,
-        0x0000001fL, 0x0000000fL,
-        0x00000007L, 0x00000003L,
-        0x00000001L, 0x00000000L
-    };
-
-    /*--------------------------------------------------------------------------*/
-#ifdef __cplusplus
-}
-#endif
-
-/*
-------------------------------------------------------------------------------
- FUNCTION NAME:
-------------------------------------------------------------------------------
- INPUT AND OUTPUT DEFINITIONS
-
- Inputs:
-    None
-
- Outputs:
-    None
-
- Returns:
-    None
-
- Global Variables Used:
-    None
-
- Local Variables Needed:
-    None
-
-------------------------------------------------------------------------------
- FUNCTION DESCRIPTION
-
- None
-
-------------------------------------------------------------------------------
- REQUIREMENTS
-
- None
-
-------------------------------------------------------------------------------
- REFERENCES
-
- [1] l_shl() function in basic_op2.c,  UMTS GSM AMR speech codec, R99 -
- Version 3.2.0, March 2, 2001
-
-------------------------------------------------------------------------------
- PSEUDO-CODE
-
-
-------------------------------------------------------------------------------
- RESOURCES USED [optional]
-
- When the code is written for a specific target processor the
- the resources used should be documented below.
-
- HEAP MEMORY USED: x bytes
-
- STACK MEMORY USED: x bytes
-
- CLOCK CYCLES: (cycle count equation for this function) + (variable
-                used to represent cycle count for each subroutine
-                called)
-     where: (cycle count variable) = cycle count for [subroutine
-                                     name]
-
-------------------------------------------------------------------------------
- CAUTION [optional]
- [State any special notes, constraints or cautions for users of this function]
-
-------------------------------------------------------------------------------
-*/
-
-/*----------------------------------------------------------------------------
-; FUNCTION CODE
-----------------------------------------------------------------------------*/
-
diff --git a/media/libstagefright/codecs/amrnb/common/src/sub.cpp b/media/libstagefright/codecs/amrnb/common/src/sub.cpp
index d936128..b956912 100644
--- a/media/libstagefright/codecs/amrnb/common/src/sub.cpp
+++ b/media/libstagefright/codecs/amrnb/common/src/sub.cpp
@@ -187,6 +187,9 @@
 ; FUNCTION CODE
 ----------------------------------------------------------------------------*/
 
+#ifdef __clang__
+__attribute__((no_sanitize("integer")))
+#endif
 Word16 sub(Word16 var1, Word16 var2, Flag *pOverflow)
 {
 
diff --git a/media/libstagefright/codecs/amrnb/common/src/syn_filt.cpp b/media/libstagefright/codecs/amrnb/common/src/syn_filt.cpp
index bcdc696..36c1d84 100644
--- a/media/libstagefright/codecs/amrnb/common/src/syn_filt.cpp
+++ b/media/libstagefright/codecs/amrnb/common/src/syn_filt.cpp
@@ -245,7 +245,9 @@
 
 ------------------------------------------------------------------------------
 */
-
+#ifdef __clang__
+__attribute__((no_sanitize("integer")))
+#endif
 void Syn_filt(
     Word16 a[],     /* (i)   : a[M+1] prediction coefficients   (M=10)  */
     Word16 x[],     /* (i)   : input signal                             */
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.mk b/media/libstagefright/codecs/amrnb/dec/Android.mk
index 76a7f40..7967ec3 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.mk
+++ b/media/libstagefright/codecs/amrnb/dec/Android.mk
@@ -48,6 +48,8 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_IMPORT_REF=
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+#LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_amrnbdec
 
@@ -71,6 +73,8 @@
 LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+#LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_amrnbdec libstagefright_amrwbdec
@@ -100,6 +104,9 @@
 LOCAL_SHARED_LIBRARIES := \
         libstagefright_amrnb_common libaudioutils liblog
 
+LOCAL_CLANG := true
+#LOCAL_SANITIZE := signed-integer-overflow
+
 LOCAL_MODULE := libstagefright_amrnbdec_test
 LOCAL_MODULE_TAGS := optional
 
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.mk b/media/libstagefright/codecs/amrnb/enc/Android.mk
index bdba8a9..f8a41af 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.mk
+++ b/media/libstagefright/codecs/amrnb/enc/Android.mk
@@ -70,6 +70,9 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)"
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+#addressing b/25409744
+#LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_amrnbenc
 
@@ -91,6 +94,9 @@
         $(LOCAL_PATH)/../common
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+#addressing b/25409744
+#LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_amrnbenc
@@ -103,3 +109,29 @@
 LOCAL_MODULE_TAGS := optional
 
 include $(BUILD_SHARED_LIBRARY)
+
+################################################################################
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := \
+    test/amrnb_enc_test.cpp
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/src \
+    $(LOCAL_PATH)/../common/include
+
+
+LOCAL_STATIC_LIBRARIES := \
+    libstagefright_amrnbenc
+
+LOCAL_SHARED_LIBRARIES := \
+    libstagefright_amrnb_common
+
+LOCAL_CLANG := true
+#addressing b/25409744
+#LOCAL_SANITIZE := signed-integer-overflow
+
+LOCAL_MODULE := libstagefright_amrnbenc_test
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/codecs/amrnb/enc/src/l_negate.cpp b/media/libstagefright/codecs/amrnb/enc/src/l_negate.cpp
index 588abbb..523e482 100644
--- a/media/libstagefright/codecs/amrnb/enc/src/l_negate.cpp
+++ b/media/libstagefright/codecs/amrnb/enc/src/l_negate.cpp
@@ -147,7 +147,7 @@
 /*----------------------------------------------------------------------------
 ; FUNCTION CODE
 ----------------------------------------------------------------------------*/
-Word32 L_negate(register Word32 L_var1)
+Word32 L_negate(Word32 L_var1)
 {
     /*----------------------------------------------------------------------------
     ; Define all local variables
diff --git a/media/libstagefright/codecs/amrnb/enc/test/amrnb_enc_test.cpp b/media/libstagefright/codecs/amrnb/enc/test/amrnb_enc_test.cpp
new file mode 100644
index 0000000..e2d198e
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/test/amrnb_enc_test.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <stdint.h>
+#include <assert.h>
+#include "gsmamr_enc.h"
+
+enum {
+    kInputSize = 320, // 160 samples * 16-bit per sample.
+    kOutputSize = 1024
+};
+
+struct AmrNbEncState {
+    void *encCtx;
+    void *pidSyncCtx;
+};
+
+void usage(void) {
+    printf("Usage:\n");
+    printf("AMRNBEnc [options] <input file> <output file>\n");
+    printf("\n");
+    printf("Options +M* for setting compression bitrate mode, default is 4.75 kbps\n");
+    printf(" +M0 = 4.75 kbps\n");
+    printf(" +M1 = 5.15 kbps\n");
+    printf(" +M2 = 5.90 kbps\n");
+    printf(" +M3 = 6.70 kbps\n");
+    printf(" +M4 = 7.40 kbps\n");
+    printf(" +M5 = 7.95 kbps\n");
+    printf(" +M6 = 10.2 kbps\n");
+    printf(" +M7 = 12.2 kbps\n");
+    printf("\n");
+}
+
+int encode(int mode, const char *srcFile, const char *dstFile) {
+    int           retVal     = EXIT_SUCCESS;
+    FILE          *fSrc      = NULL;
+    FILE          *fDst      = NULL;
+    int           frameNum   = 0;
+    bool          eofReached = false;
+    uint16_t      *inputBuf  = NULL;
+    uint8_t       *outputBuf = NULL;
+    AmrNbEncState *amr       = NULL;
+
+    clock_t   start, finish;
+    double    duration = 0.0;
+
+    // Open input file.
+    fSrc = fopen(srcFile, "rb");
+    if (fSrc == NULL) {
+        fprintf(stderr, "Error opening input file\n");
+        retVal = EXIT_FAILURE;
+        goto safe_exit;
+    }
+
+    // Open output file.
+    fDst = fopen(dstFile, "wb");
+    if (fDst == NULL) {
+        fprintf(stderr, "Error opening output file\n");
+        retVal = EXIT_FAILURE;
+        goto safe_exit;
+    }
+
+    // Allocate input buffer.
+    inputBuf = (uint16_t*) malloc(kInputSize);
+    assert(inputBuf != NULL);
+
+    // Allocate output buffer.
+    outputBuf = (uint8_t*) malloc(kOutputSize);
+    assert(outputBuf != NULL);
+
+    // Initialize encoder.
+    amr = (AmrNbEncState*) malloc(sizeof(AmrNbEncState));
+    AMREncodeInit(&amr->encCtx, &amr->pidSyncCtx, 0);
+
+    // Write file header.
+    fwrite("#!AMR\n", 1, 6, fDst);
+
+    while (1) {
+        // Read next input frame.
+        int bytesRead;
+        bytesRead = fread(inputBuf, 1, kInputSize, fSrc);
+        if (bytesRead != kInputSize && !feof(fSrc)) {
+            retVal = EXIT_FAILURE; // Invalid magic number.
+            fprintf(stderr, "Error reading input file\n");
+            goto safe_exit;
+        } else if (feof(fSrc) && bytesRead == 0) {
+            eofReached = true;
+            break;
+        }
+
+        start = clock();
+
+        // Encode the frame.
+        Frame_Type_3GPP frame_type = (Frame_Type_3GPP) mode;
+        int bytesGenerated;
+        bytesGenerated = AMREncode(amr->encCtx, amr->pidSyncCtx, (Mode)mode,
+                                   (Word16*)inputBuf, outputBuf, &frame_type,
+                                   AMR_TX_WMF);
+
+        // Convert from WMF to RFC 3267 format.
+        if (bytesGenerated > 0) {
+            outputBuf[0] = ((outputBuf[0] << 3) | 4) & 0x7c;
+        }
+
+        finish = clock();
+        duration += finish - start;
+
+        if (bytesGenerated < 0) {
+            retVal = EXIT_FAILURE;
+            fprintf(stderr, "Encoding error\n");
+            goto safe_exit;
+        }
+
+        frameNum++;
+        printf(" Frames processed: %d\n", frameNum);
+
+        // Write the output.
+        fwrite(outputBuf, 1, bytesGenerated, fDst);
+    }
+
+    // Dump the time taken by encode.
+    printf("\n%2.5lf seconds\n", (double)duration/CLOCKS_PER_SEC);
+
+safe_exit:
+
+    // Free the encoder instance.
+    if (amr) {
+        AMREncodeExit(&amr->encCtx, &amr->pidSyncCtx);
+        free(amr);
+    }
+
+    // Free input and output buffer.
+    free(inputBuf);
+    free(outputBuf);
+
+    // Close the input and output files.
+    if (fSrc) {
+        fclose(fSrc);
+    }
+    if (fDst) {
+        fclose(fDst);
+    }
+
+    return retVal;
+}
+
+int main(int argc, char *argv[]) {
+    Mode  mode = MR475;
+    int   retVal;
+    char  *inFileName = NULL;
+    char  *outFileName = NULL;
+    int   arg, filename = 0;
+
+    if (argc < 3) {
+        usage();
+        return EXIT_FAILURE;
+    } else {
+        for (arg = 1; arg < argc; arg++) {
+            if (argv[arg][0] == '+') {
+                if (argv[arg][1] == 'M') {
+                    switch (argv[arg][2]) {
+                    case '0': mode = MR475;
+                        break;
+                    case '1': mode = MR515;
+                        break;
+                    case '2': mode = MR59;
+                        break;
+                    case '3': mode = MR67;
+                        break;
+                    case '4': mode = MR74;
+                        break;
+                    case '5': mode = MR795;
+                        break;
+                    case '6': mode = MR102;
+                        break;
+                    case '7': mode = MR122;
+                        break;
+                    default:
+                        usage();
+                        fprintf(stderr, "Invalid parameter '%s'.\n", argv[arg]);
+                        return EXIT_FAILURE;
+                        break;
+                    }
+                } else {
+                    usage();
+                    fprintf(stderr, "Invalid parameter '%s'.\n", argv[arg]);
+                    return EXIT_FAILURE;
+                }
+            } else {
+                switch (filename) {
+                case 0:
+                    inFileName  = argv[arg];
+                    break;
+                case 1:
+                    outFileName = argv[arg];
+                    break;
+                default:
+                    usage();
+                    fprintf(stderr, "Invalid parameter '%s'.\n", argv[arg]);
+                    return EXIT_FAILURE;
+                }
+                filename++;
+            }
+        }
+    }
+
+    retVal = encode(mode, inFileName, outFileName);
+    return retVal;
+}
+
diff --git a/media/libstagefright/codecs/amrwb/Android.mk b/media/libstagefright/codecs/amrwb/Android.mk
index 686f7a3..1649c4a 100644
--- a/media/libstagefright/codecs/amrwb/Android.mk
+++ b/media/libstagefright/codecs/amrwb/Android.mk
@@ -51,7 +51,33 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_IMPORT_REF=
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_amrwbdec
 
 include $(BUILD_STATIC_LIBRARY)
+
+################################################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := \
+        test/amrwbdec_test.cpp
+
+LOCAL_C_INCLUDES := \
+        $(LOCAL_PATH)/src \
+        $(LOCAL_PATH)/include \
+        $(call include-path-for, audio-utils)
+
+LOCAL_STATIC_LIBRARIES := \
+        libstagefright_amrwbdec libsndfile
+
+LOCAL_SHARED_LIBRARIES := \
+        libaudioutils
+
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
+
+LOCAL_MODULE := libstagefright_amrwbdec_test
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/codecs/amrwb/src/pvamrwbdecoder_basic_op_cequivalent.h b/media/libstagefright/codecs/amrwb/src/pvamrwbdecoder_basic_op_cequivalent.h
index 7fd680d..3c7590c 100644
--- a/media/libstagefright/codecs/amrwb/src/pvamrwbdecoder_basic_op_cequivalent.h
+++ b/media/libstagefright/codecs/amrwb/src/pvamrwbdecoder_basic_op_cequivalent.h
@@ -206,16 +206,18 @@
     {
         int32 L_var_out;
 
-        L_var_out = L_var1 + L_var2;
-
-        if (((L_var1 ^ L_var2) & MIN_32) == 0)  /* same sign ? */
-        {
-            if ((L_var_out ^ L_var1) & MIN_32)  /* addition matches sign ? */
-            {
-                L_var_out = (L_var1 >> 31) ^ MAX_32;
+        //L_var_out = L_var1 + L_var2;
+        if (L_var2 < 0) {
+            if (L_var1 < MIN_32 - L_var2) {
+                return MIN_32;
+            }
+        } else {
+            if (L_var1 > MAX_32 - L_var2) {
+                return MAX_32;
             }
         }
-        return (L_var_out);
+
+        return L_var1 + L_var2;
     }
 
 
@@ -248,142 +250,24 @@
 
     __inline  int32 sub_int32(int32 L_var1, int32 L_var2)
     {
-        int32 L_var_out;
-
-        L_var_out = L_var1 - L_var2;
-
-        if (((L_var1 ^ L_var2) & MIN_32) != 0)  /* different sign ? */
-        {
-            if ((L_var_out ^ L_var1) & MIN_32)  /* difference matches sign ? */
-            {
-                L_var_out = (L_var1 >> 31) ^ MAX_32;
+        //L_var_out = L_var1 - L_var2;
+        if (L_var2 < 0) {
+            if (L_var1 > MAX_32 + L_var2) {
+                return  MAX_32;
             }
-        }
-        return (L_var_out);
-    }
-
-
-
-    /*----------------------------------------------------------------------------
-
-         Function Name : mac_16by16_to_int32
-
-         Multiply var1 by var2 and shift the result left by 1. Add the 32 bit
-         result to L_var3 with saturation, return a 32 bit result:
-              L_mac(L_var3,var1,var2) = L_add(L_var3,L_mult(var1,var2)).
-
-         Inputs :
-
-          L_var3   32 bit long signed integer (int32) whose value falls in the
-                   range : 0x8000 0000 <= L_var3 <= 0x7fff ffff.
-
-          var1
-                   16 bit short signed integer (int16) whose value falls in the
-                   range : 0xffff 8000 <= var1 <= 0x0000 7fff.
-
-          var2
-                   16 bit short signed integer (int16) whose value falls in the
-                   range : 0xffff 8000 <= var1 <= 0x0000 7fff.
-
-
-         Return Value :
-                   32 bit long signed integer (int32) whose value falls in the
-                   range : 0x8000 0000 <= L_var_out <= 0x7fff ffff.
-
-     ----------------------------------------------------------------------------*/
-
-
-    __inline  int32 mac_16by16_to_int32(int32 L_var3, int16 var1, int16 var2)
-    {
-        int32 L_var_out;
-        int32 L_mul;
-
-        L_mul  = ((int32) var1 * (int32) var2);
-
-        if (L_mul != 0x40000000)
-        {
-            L_mul <<= 1;
-        }
-        else
-        {
-            L_mul = MAX_32;     /* saturation */
-        }
-
-        L_var_out = L_var3 + L_mul;
-
-        if (((L_mul ^ L_var3) & MIN_32) == 0)  /* same sign ? */
-        {
-            if ((L_var_out ^ L_var3) & MIN_32)  /* addition matches sign ? */
-            {
-                L_var_out = (L_var3 >> 31) ^ MAX_32;
+        } else {
+            if (L_var1 < MIN_32 + L_var2) {
+                return MIN_32;
             }
         }
 
-        return (L_var_out);
+        return L_var1 - L_var2;
     }
 
 
 
     /*----------------------------------------------------------------------------
 
-         Function Name : msu_16by16_from_int32
-
-         Multiply var1 by var2 and shift the result left by 1. Subtract the 32 bit
-         result to L_var3 with saturation, return a 32 bit result:
-              L_msu(L_var3,var1,var2) = L_sub(L_var3,L_mult(var1,var2)).
-
-         Inputs :
-
-          L_var3   32 bit long signed integer (int32) whose value falls in the
-                   range : 0x8000 0000 <= L_var3 <= 0x7fff ffff.
-
-          var1
-                   16 bit short signed integer (int16) whose value falls in the
-                   range : 0xffff 8000 <= var1 <= 0x0000 7fff.
-
-          var2
-                   16 bit short signed integer (int16) whose value falls in the
-                   range : 0xffff 8000 <= var1 <= 0x0000 7fff.
-
-
-         Return Value :
-                   32 bit long signed integer (int32) whose value falls in the
-                   range : 0x8000 0000 <= L_var_out <= 0x7fff ffff.
-
-     ----------------------------------------------------------------------------*/
-
-    __inline  int32 msu_16by16_from_int32(int32 L_var3, int16 var1, int16 var2)
-    {
-        int32 L_var_out;
-        int32 L_mul;
-
-        L_mul  = ((int32) var1 * (int32) var2);
-
-        if (L_mul != 0x40000000)
-        {
-            L_mul <<= 1;
-        }
-        else
-        {
-            L_mul = MAX_32;     /* saturation */
-        }
-
-        L_var_out = L_var3 - L_mul;
-
-        if (((L_mul ^ L_var3) & MIN_32) != 0)  /* different sign ? */
-        {
-            if ((L_var_out ^ L_var3) & MIN_32)  /* difference matches sign ? */
-            {
-                L_var_out = (L_var3 >> 31) ^ MAX_32;
-            }
-        }
-
-        return (L_var_out);
-    }
-
-
-    /*----------------------------------------------------------------------------
-
          Function Name : mul_16by16_to_int32
 
          mul_16by16_to_int32 is the 32 bit result of the multiplication of var1
@@ -428,6 +312,75 @@
 
     /*----------------------------------------------------------------------------
 
+         Function Name : mac_16by16_to_int32
+
+         Multiply var1 by var2 and shift the result left by 1. Add the 32 bit
+         result to L_var3 with saturation, return a 32 bit result:
+              L_mac(L_var3,var1,var2) = L_add(L_var3,L_mult(var1,var2)).
+
+         Inputs :
+
+          L_var3   32 bit long signed integer (int32) whose value falls in the
+                   range : 0x8000 0000 <= L_var3 <= 0x7fff ffff.
+
+          var1
+                   16 bit short signed integer (int16) whose value falls in the
+                   range : 0xffff 8000 <= var1 <= 0x0000 7fff.
+
+          var2
+                   16 bit short signed integer (int16) whose value falls in the
+                   range : 0xffff 8000 <= var1 <= 0x0000 7fff.
+
+
+         Return Value :
+                   32 bit long signed integer (int32) whose value falls in the
+                   range : 0x8000 0000 <= L_var_out <= 0x7fff ffff.
+
+     ----------------------------------------------------------------------------*/
+
+
+    __inline  int32 mac_16by16_to_int32(int32 L_var3, int16 var1, int16 var2)
+    {
+        return add_int32(L_var3, mul_16by16_to_int32(var1, var2));
+    }
+
+
+    /*----------------------------------------------------------------------------
+
+         Function Name : msu_16by16_from_int32
+
+         Multiply var1 by var2 and shift the result left by 1. Subtract the 32 bit
+         result to L_var3 with saturation, return a 32 bit result:
+              L_msu(L_var3,var1,var2) = L_sub(L_var3,L_mult(var1,var2)).
+
+         Inputs :
+
+          L_var3   32 bit long signed integer (int32) whose value falls in the
+                   range : 0x8000 0000 <= L_var3 <= 0x7fff ffff.
+
+          var1
+                   16 bit short signed integer (int16) whose value falls in the
+                   range : 0xffff 8000 <= var1 <= 0x0000 7fff.
+
+          var2
+                   16 bit short signed integer (int16) whose value falls in the
+                   range : 0xffff 8000 <= var1 <= 0x0000 7fff.
+
+
+         Return Value :
+                   32 bit long signed integer (int32) whose value falls in the
+                   range : 0x8000 0000 <= L_var_out <= 0x7fff ffff.
+
+     ----------------------------------------------------------------------------*/
+
+    __inline  int32 msu_16by16_from_int32(int32 L_var3, int16 var1, int16 var2)
+    {
+        return sub_int32(L_var3, mul_16by16_to_int32(var1, var2));
+    }
+
+
+    /*----------------------------------------------------------------------------
+
          Function Name : amr_wb_round
 
          Round the lower 16 bits of the 32 bit input number into the MS 16 bits
@@ -447,7 +400,7 @@
      ----------------------------------------------------------------------------*/
     __inline int16 amr_wb_round(int32 L_var1)
     {
-        if (L_var1 != MAX_32)
+        if (L_var1 <= (MAX_32 - 0x00008000L))
         {
             L_var1 +=  0x00008000L;
         }
diff --git a/media/libstagefright/codecs/amrwb/test/amrwbdec_test.cpp b/media/libstagefright/codecs/amrwb/test/amrwbdec_test.cpp
new file mode 100644
index 0000000..b04bafd
--- /dev/null
+++ b/media/libstagefright/codecs/amrwb/test/amrwbdec_test.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "pvamrwbdecoder.h"
+#include <audio_utils/sndfile.h>
+
+// Constants for AMR-WB.
+enum {
+    kInputBufferSize = 64,
+    kSamplesPerFrame = 320,
+    kBitsPerSample = 16,
+    kOutputBufferSize = kSamplesPerFrame * kBitsPerSample/8,
+    kSampleRate = 16000,
+    kChannels = 1,
+    kFileHeaderSize = 9,
+    kMaxSourceDataUnitSize = 477 * sizeof(int16_t)
+};
+
+const uint32_t kFrameSizes[] = { 17, 23, 32, 36, 40, 46, 50, 58, 60 };
+
+int main(int argc, char *argv[]) {
+
+    if (argc != 3) {
+        fprintf(stderr, "Usage %s <input file> <output file>\n", argv[0]);
+        return EXIT_FAILURE;
+    }
+
+    // Open the input file.
+    FILE* fpInput = fopen(argv[1], "rb");
+    if (fpInput == NULL) {
+        fprintf(stderr, "Could not open %s\n", argv[1]);
+        return EXIT_FAILURE;
+    }
+
+    // Validate the input AMR file.
+    char header[kFileHeaderSize];
+    int bytesRead = fread(header, 1, kFileHeaderSize, fpInput);
+    if ((bytesRead != kFileHeaderSize) ||
+        (memcmp(header, "#!AMR-WB\n", kFileHeaderSize) != 0)) {
+        fprintf(stderr, "Invalid AMR-WB file\n");
+        fclose(fpInput);
+        return EXIT_FAILURE;
+    }
+
+    // Open the output file.
+    SF_INFO sfInfo;
+    memset(&sfInfo, 0, sizeof(SF_INFO));
+    sfInfo.channels = kChannels;
+    sfInfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
+    sfInfo.samplerate = kSampleRate;
+    SNDFILE *handle = sf_open(argv[2], SFM_WRITE, &sfInfo);
+    if (handle == NULL) {
+        fprintf(stderr, "Could not create %s\n", argv[2]);
+        fclose(fpInput);
+        return EXIT_FAILURE;
+    }
+
+    // Allocate the decoder memory.
+    uint32_t memRequirements = pvDecoder_AmrWbMemRequirements();
+    void *decoderBuf = malloc(memRequirements);
+    assert(decoderBuf != NULL);
+
+    // Create AMR-WB decoder instance.
+    void *amrHandle;
+    int16_t *decoderCookie;
+    pvDecoder_AmrWb_Init(&amrHandle, decoderBuf, &decoderCookie);
+
+    // Allocate input buffer.
+    uint8_t *inputBuf = (uint8_t*) malloc(kInputBufferSize);
+    assert(inputBuf != NULL);
+
+    // Allocate input sample buffer.
+    int16_t *inputSampleBuf = (int16_t*) malloc(kMaxSourceDataUnitSize);
+    assert(inputSampleBuf != NULL);
+
+    // Allocate output buffer.
+    int16_t *outputBuf = (int16_t*) malloc(kOutputBufferSize);
+    assert(outputBuf != NULL);
+
+    // Decode loop.
+    int retVal = EXIT_SUCCESS;
+    while (1) {
+        // Read mode.
+        uint8_t modeByte;
+        bytesRead = fread(&modeByte, 1, 1, fpInput);
+        if (bytesRead != 1) break;
+        int16 mode = ((modeByte >> 3) & 0x0f);
+
+        // AMR-WB file format cannot have mode 10, 11, 12 and 13.
+        if (mode >= 10 && mode <= 13) {
+            fprintf(stderr, "Encountered illegal frame type %d\n", mode);
+            retVal = EXIT_FAILURE;
+            break;
+        }
+
+        if (mode >= 9) {
+            // Produce silence for comfort noise, speech lost and no data.
+            memset(outputBuf, 0, kOutputBufferSize);
+        } else /* if (mode < 9) */ {
+            // Read rest of the frame.
+            int32_t frameSize = kFrameSizes[mode];
+            bytesRead = fread(inputBuf, 1, frameSize, fpInput);
+            if (bytesRead != frameSize) break;
+
+            int16 frameType, frameMode;
+            RX_State_wb rx_state;
+            frameMode = mode;
+            mime_unsorting(
+                    (uint8_t *)inputBuf,
+                    inputSampleBuf,
+                    &frameType, &frameMode, 1, &rx_state);
+
+            int16_t numSamplesOutput;
+            pvDecoder_AmrWb(
+                    frameMode, inputSampleBuf,
+                    outputBuf,
+                    &numSamplesOutput,
+                    decoderBuf, frameType, decoderCookie);
+
+            if (numSamplesOutput != kSamplesPerFrame) {
+                fprintf(stderr, "Decoder encountered error\n");
+                retVal = EXIT_FAILURE;
+                break;
+            }
+
+            for (int i = 0; i < kSamplesPerFrame; ++i) {
+                outputBuf[i] &= 0xfffC;
+            }
+        }
+
+        // Write output to wav.
+        sf_writef_short(handle, outputBuf, kSamplesPerFrame / kChannels);
+    }
+
+    // Close input and output file.
+    fclose(fpInput);
+    sf_close(handle);
+
+    // Free allocated memory.
+    free(inputBuf);
+    free(inputSampleBuf);
+    free(outputBuf);
+
+    return retVal;
+}
diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk
index 024a292..026006e 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/Android.mk
@@ -1,8 +1,5 @@
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
-include frameworks/av/media/libstagefright/codecs/common/Config.mk
-
-
 
 LOCAL_SRC_FILES := \
 	src/autocorr.c \
@@ -53,42 +50,42 @@
 	src/weight_a.c \
 	src/mem_align.c
 
+ifneq ($(ARCH_ARM_HAVE_NEON),true)
+    LOCAL_SRC_FILES_arm := \
+        src/asm/ARMV5E/convolve_opt.s \
+        src/asm/ARMV5E/cor_h_vec_opt.s \
+        src/asm/ARMV5E/Deemph_32_opt.s \
+        src/asm/ARMV5E/Dot_p_opt.s \
+        src/asm/ARMV5E/Filt_6k_7k_opt.s \
+        src/asm/ARMV5E/Norm_Corr_opt.s \
+        src/asm/ARMV5E/pred_lt4_1_opt.s \
+        src/asm/ARMV5E/residu_asm_opt.s \
+        src/asm/ARMV5E/scale_sig_opt.s \
+        src/asm/ARMV5E/Syn_filt_32_opt.s \
+        src/asm/ARMV5E/syn_filt_opt.s
 
-ifeq ($(VOTT), v5)
-LOCAL_SRC_FILES += \
-	src/asm/ARMV5E/convolve_opt.s \
-	src/asm/ARMV5E/cor_h_vec_opt.s \
-	src/asm/ARMV5E/Deemph_32_opt.s \
-	src/asm/ARMV5E/Dot_p_opt.s \
-	src/asm/ARMV5E/Filt_6k_7k_opt.s \
-	src/asm/ARMV5E/Norm_Corr_opt.s \
-	src/asm/ARMV5E/pred_lt4_1_opt.s \
-	src/asm/ARMV5E/residu_asm_opt.s \
-	src/asm/ARMV5E/scale_sig_opt.s \
-	src/asm/ARMV5E/Syn_filt_32_opt.s \
-	src/asm/ARMV5E/syn_filt_opt.s
+    LOCAL_CFLAGS_arm := -DARM -DASM_OPT
+    LOCAL_C_INCLUDES_arm = $(LOCAL_PATH)/src/asm/ARMV5E
+else
+    LOCAL_SRC_FILES_arm := \
+        src/asm/ARMV7/convolve_neon.s \
+        src/asm/ARMV7/cor_h_vec_neon.s \
+        src/asm/ARMV7/Deemph_32_neon.s \
+        src/asm/ARMV7/Dot_p_neon.s \
+        src/asm/ARMV7/Filt_6k_7k_neon.s \
+        src/asm/ARMV7/Norm_Corr_neon.s \
+        src/asm/ARMV7/pred_lt4_1_neon.s \
+        src/asm/ARMV7/residu_asm_neon.s \
+        src/asm/ARMV7/scale_sig_neon.s \
+        src/asm/ARMV7/Syn_filt_32_neon.s \
+        src/asm/ARMV7/syn_filt_neon.s
 
+    # don't actually generate neon instructions, see bug 26932980
+    LOCAL_CFLAGS_arm := -DARM -DARMV7 -DASM_OPT -mfpu=vfpv3
+    LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/src/asm/ARMV5E
+    LOCAL_C_INCLUDES_arm += $(LOCAL_PATH)/src/asm/ARMV7
 endif
 
-ifeq ($(VOTT), v7)
-LOCAL_SRC_FILES += \
-	src/asm/ARMV7/convolve_neon.s \
-	src/asm/ARMV7/cor_h_vec_neon.s \
-	src/asm/ARMV7/Deemph_32_neon.s \
-	src/asm/ARMV7/Dot_p_neon.s \
-	src/asm/ARMV7/Filt_6k_7k_neon.s \
-	src/asm/ARMV7/Norm_Corr_neon.s \
-	src/asm/ARMV7/pred_lt4_1_neon.s \
-	src/asm/ARMV7/residu_asm_neon.s \
-	src/asm/ARMV7/scale_sig_neon.s \
-	src/asm/ARMV7/Syn_filt_32_neon.s \
-	src/asm/ARMV7/syn_filt_neon.s
-
-endif
-
-# ARMV5E/Filt_6k_7k_opt.s does not compile with Clang.
-LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
-
 LOCAL_MODULE := libstagefright_amrwbenc
 
 LOCAL_ARM_MODE := arm
@@ -104,18 +101,9 @@
 	$(LOCAL_PATH)/src \
 	$(LOCAL_PATH)/inc
 
-ifeq ($(VOTT), v5)
-LOCAL_CFLAGS += -DARM -DASM_OPT
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
-endif
-
-ifeq ($(VOTT), v7)
-LOCAL_CFLAGS += -DARM -DARMV7 -DASM_OPT
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV5E
-LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7
-endif
-
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+#LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
 
@@ -132,6 +120,8 @@
 	frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_amrwbenc
@@ -144,3 +134,6 @@
 LOCAL_MODULE_TAGS := optional
 
 include $(BUILD_SHARED_LIBRARY)
+
+################################################################################
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c b/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
index 4ff5f10..0cb0097 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/AMRWB_E_SAMPLE.c
@@ -21,6 +21,7 @@
 
 #include      <stdio.h>
 #include      <stdlib.h>
+#include      <string.h>
 #include      <time.h>
 #include      "voAMRWB.h"
 #include      "cmnMemory.h"
@@ -222,12 +223,12 @@
 					fflush(fdst);
 				}
 			}
-			else if(returnCode == VO_ERR_LICENSE_ERROR)
+			else if((unsigned)returnCode == VO_ERR_LICENSE_ERROR)
 			{
 		        printf("Encoder time reach upper limit......");
 		        goto safe_exit;
 			}
-		} while(returnCode != VO_ERR_INPUT_BUFFER_SMALL);
+		} while((unsigned)returnCode != VO_ERR_INPUT_BUFFER_SMALL);
 
 		finish = clock();
 		duration += finish - start;
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
index c203f77..65d69a2 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
@@ -5,17 +5,20 @@
     AMRWB_E_SAMPLE.c \
     ../../common/cmnMemory.c
 
-LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_TAGS := tests
 LOCAL_MODULE := AMRWBEncTest
 
 LOCAL_ARM_MODE := arm
 
-LOCAL_CFLAGS := -DLINUX
+LOCAL_CFLAGS :=
 
 LOCAL_SHARED_LIBRARIES := \
     libstagefright \
     libdl
 
+LOCAL_STATIC_LIBRARIES := \
+    libstagefright_amrwbenc
+
 LOCAL_C_INCLUDES := \
     $(LOCAL_PATH)/ \
     $(LOCAL_PATH)/../../common \
diff --git a/media/libstagefright/codecs/amrwbenc/inc/acelp.h b/media/libstagefright/codecs/amrwbenc/inc/acelp.h
index 5a1e536..97555d5 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/acelp.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/acelp.h
@@ -18,7 +18,7 @@
 /*--------------------------------------------------------------------------*
  *                         ACELP.H                                          *
  *--------------------------------------------------------------------------*
- *       Function			 			             *
+ *       Function                                    *
  *--------------------------------------------------------------------------*/
 #ifndef __ACELP_H__
 #define __ACELP_H__
@@ -33,68 +33,68 @@
 Word16 median5(Word16 x[]);
 
 void Autocorr(
-		Word16 x[],                           /* (i)    : Input signal                      */
-		Word16 m,                             /* (i)    : LPC order                         */
-		Word16 r_h[],                         /* (o)    : Autocorrelations  (msb)           */
-		Word16 r_l[]                          /* (o)    : Autocorrelations  (lsb)           */
-	     );
+        Word16 x[],                           /* (i)    : Input signal                      */
+        Word16 m,                             /* (i)    : LPC order                         */
+        Word16 r_h[],                         /* (o)    : Autocorrelations  (msb)           */
+        Word16 r_l[]                          /* (o)    : Autocorrelations  (lsb)           */
+         );
 
 void Lag_window(
-		Word16 r_h[],                         /* (i/o)   : Autocorrelations  (msb)          */
-		Word16 r_l[]                          /* (i/o)   : Autocorrelations  (lsb)          */
-	       );
+        Word16 r_h[],                         /* (i/o)   : Autocorrelations  (msb)          */
+        Word16 r_l[]                          /* (i/o)   : Autocorrelations  (lsb)          */
+           );
 
 void Init_Levinson(
-		Word16 * mem                          /* output  :static memory (18 words) */
-		);
+        Word16 * mem                          /* output  :static memory (18 words) */
+        );
 
 void Levinson(
-		Word16 Rh[],                          /* (i)     : Rh[M+1] Vector of autocorrelations (msb) */
-		Word16 Rl[],                          /* (i)     : Rl[M+1] Vector of autocorrelations (lsb) */
-		Word16 A[],                           /* (o) Q12 : A[M]    LPC coefficients  (m = 16)       */
-		Word16 rc[],                          /* (o) Q15 : rc[M]   Reflection coefficients.         */
-		Word16 * mem                          /* (i/o)   :static memory (18 words)                  */
-	     );
+        Word16 Rh[],                          /* (i)     : Rh[M+1] Vector of autocorrelations (msb) */
+        Word16 Rl[],                          /* (i)     : Rl[M+1] Vector of autocorrelations (lsb) */
+        Word16 A[],                           /* (o) Q12 : A[M]    LPC coefficients  (m = 16)       */
+        Word16 rc[],                          /* (o) Q15 : rc[M]   Reflection coefficients.         */
+        Word16 * mem                          /* (i/o)   :static memory (18 words)                  */
+         );
 
 void Az_isp(
-		Word16 a[],                           /* (i) Q12 : predictor coefficients                 */
-		Word16 isp[],                         /* (o) Q15 : Immittance spectral pairs              */
-		Word16 old_isp[]                      /* (i)     : old isp[] (in case not found M roots)  */
-	   );
+        Word16 a[],                           /* (i) Q12 : predictor coefficients                 */
+        Word16 isp[],                         /* (o) Q15 : Immittance spectral pairs              */
+        Word16 old_isp[]                      /* (i)     : old isp[] (in case not found M roots)  */
+       );
 
 void Isp_Az(
-		Word16 isp[],                         /* (i) Q15 : Immittance spectral pairs            */
-		Word16 a[],                           /* (o) Q12 : predictor coefficients (order = M)   */
-		Word16 m,
-		Word16 adaptive_scaling               /* (i) 0   : adaptive scaling disabled */
-		/*     1   : adaptive scaling enabled  */
-	   );
+        Word16 isp[],                         /* (i) Q15 : Immittance spectral pairs            */
+        Word16 a[],                           /* (o) Q12 : predictor coefficients (order = M)   */
+        Word16 m,
+        Word16 adaptive_scaling               /* (i) 0   : adaptive scaling disabled */
+        /*     1   : adaptive scaling enabled  */
+       );
 
 void Isp_isf(
-		Word16 isp[],                         /* (i) Q15 : isp[m] (range: -1<=val<1)                */
-		Word16 isf[],                         /* (o) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
-		Word16 m                              /* (i)     : LPC order                                */
-	    );
+        Word16 isp[],                         /* (i) Q15 : isp[m] (range: -1<=val<1)                */
+        Word16 isf[],                         /* (o) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
+        Word16 m                              /* (i)     : LPC order                                */
+        );
 
 void Isf_isp(
-		Word16 isf[],                         /* (i) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
-		Word16 isp[],                         /* (o) Q15 : isp[m] (range: -1<=val<1)                */
-		Word16 m                              /* (i)     : LPC order                                */
-	    );
+        Word16 isf[],                         /* (i) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
+        Word16 isp[],                         /* (o) Q15 : isp[m] (range: -1<=val<1)                */
+        Word16 m                              /* (i)     : LPC order                                */
+        );
 
 void Int_isp(
-		Word16 isp_old[],                     /* input : isps from past frame              */
-		Word16 isp_new[],                     /* input : isps from present frame           */
-		Word16 frac[],                        /* input : fraction for 3 first subfr (Q15)  */
-		Word16 Az[]                           /* output: LP coefficients in 4 subframes    */
-	    );
+        Word16 isp_old[],                     /* input : isps from past frame              */
+        Word16 isp_new[],                     /* input : isps from present frame           */
+        Word16 frac[],                        /* input : fraction for 3 first subfr (Q15)  */
+        Word16 Az[]                           /* output: LP coefficients in 4 subframes    */
+        );
 
 void Weight_a(
-		Word16 a[],                           /* (i) Q12 : a[m+1]  LPC coefficients             */
-		Word16 ap[],                          /* (o) Q12 : Spectral expanded LPC coefficients   */
-		Word16 gamma,                         /* (i) Q15 : Spectral expansion factor.           */
-		Word16 m                              /* (i)     : LPC order.                           */
-	     );
+        Word16 a[],                           /* (i) Q12 : a[m+1]  LPC coefficients             */
+        Word16 ap[],                          /* (o) Q12 : Spectral expanded LPC coefficients   */
+        Word16 gamma,                         /* (i) Q15 : Spectral expansion factor.           */
+        Word16 m                              /* (i)     : LPC order.                           */
+         );
 
 
 /*-----------------------------------------------------------------*
@@ -102,214 +102,214 @@
  *-----------------------------------------------------------------*/
 
 void Qpisf_2s_46b(
-		Word16 * isf1,                        /* (i) Q15 : ISF in the frequency domain (0..0.5) */
-		Word16 * isf_q,                       /* (o) Q15 : quantized ISF               (0..0.5) */
-		Word16 * past_isfq,                   /* (io)Q15 : past ISF quantizer                   */
-		Word16 * indice,                      /* (o)     : quantization indices                 */
-		Word16 nb_surv                        /* (i)     : number of survivor (1, 2, 3 or 4)    */
-		);
+        Word16 * isf1,                        /* (i) Q15 : ISF in the frequency domain (0..0.5) */
+        Word16 * isf_q,                       /* (o) Q15 : quantized ISF               (0..0.5) */
+        Word16 * past_isfq,                   /* (io)Q15 : past ISF quantizer                   */
+        Word16 * indice,                      /* (o)     : quantization indices                 */
+        Word16 nb_surv                        /* (i)     : number of survivor (1, 2, 3 or 4)    */
+        );
 
 void Qpisf_2s_36b(
-		Word16 * isf1,                        /* (i) Q15 : ISF in the frequency domain (0..0.5) */
-		Word16 * isf_q,                       /* (o) Q15 : quantized ISF               (0..0.5) */
-		Word16 * past_isfq,                   /* (io)Q15 : past ISF quantizer                   */
-		Word16 * indice,                      /* (o)     : quantization indices                 */
-		Word16 nb_surv                        /* (i)     : number of survivor (1, 2, 3 or 4)    */
-		);
+        Word16 * isf1,                        /* (i) Q15 : ISF in the frequency domain (0..0.5) */
+        Word16 * isf_q,                       /* (o) Q15 : quantized ISF               (0..0.5) */
+        Word16 * past_isfq,                   /* (io)Q15 : past ISF quantizer                   */
+        Word16 * indice,                      /* (o)     : quantization indices                 */
+        Word16 nb_surv                        /* (i)     : number of survivor (1, 2, 3 or 4)    */
+        );
 
 void Dpisf_2s_46b(
-		Word16 * indice,                      /* input:  quantization indices                       */
-		Word16 * isf_q,                       /* output: quantized ISF in frequency domain (0..0.5) */
-		Word16 * past_isfq,                   /* i/0   : past ISF quantizer                    */
-		Word16 * isfold,                      /* input : past quantized ISF                    */
-		Word16 * isf_buf,                     /* input : isf buffer                                                        */
-		Word16 bfi,                           /* input : Bad frame indicator                   */
-		Word16 enc_dec
-		);
+        Word16 * indice,                      /* input:  quantization indices                       */
+        Word16 * isf_q,                       /* output: quantized ISF in frequency domain (0..0.5) */
+        Word16 * past_isfq,                   /* i/0   : past ISF quantizer                    */
+        Word16 * isfold,                      /* input : past quantized ISF                    */
+        Word16 * isf_buf,                     /* input : isf buffer                                                        */
+        Word16 bfi,                           /* input : Bad frame indicator                   */
+        Word16 enc_dec
+        );
 
 void Dpisf_2s_36b(
-		Word16 * indice,                      /* input:  quantization indices                       */
-		Word16 * isf_q,                       /* output: quantized ISF in frequency domain (0..0.5) */
-		Word16 * past_isfq,                   /* i/0   : past ISF quantizer                    */
-		Word16 * isfold,                      /* input : past quantized ISF                    */
-		Word16 * isf_buf,                     /* input : isf buffer                                                        */
-		Word16 bfi,                           /* input : Bad frame indicator                   */
-		Word16 enc_dec
-		);
+        Word16 * indice,                      /* input:  quantization indices                       */
+        Word16 * isf_q,                       /* output: quantized ISF in frequency domain (0..0.5) */
+        Word16 * past_isfq,                   /* i/0   : past ISF quantizer                    */
+        Word16 * isfold,                      /* input : past quantized ISF                    */
+        Word16 * isf_buf,                     /* input : isf buffer                                                        */
+        Word16 bfi,                           /* input : Bad frame indicator                   */
+        Word16 enc_dec
+        );
 
 void Qisf_ns(
-		Word16 * isf1,                        /* input : ISF in the frequency domain (0..0.5) */
-		Word16 * isf_q,                       /* output: quantized ISF                        */
-		Word16 * indice                       /* output: quantization indices                 */
-	    );
+        Word16 * isf1,                        /* input : ISF in the frequency domain (0..0.5) */
+        Word16 * isf_q,                       /* output: quantized ISF                        */
+        Word16 * indice                       /* output: quantization indices                 */
+        );
 
 void Disf_ns(
-		Word16 * indice,                      /* input:  quantization indices                  */
-		Word16 * isf_q                        /* input : ISF in the frequency domain (0..0.5)  */
-	    );
+        Word16 * indice,                      /* input:  quantization indices                  */
+        Word16 * isf_q                        /* input : ISF in the frequency domain (0..0.5)  */
+        );
 
 Word16 Sub_VQ(                             /* output: return quantization index     */
-		Word16 * x,                           /* input : ISF residual vector           */
-		Word16 * dico,                        /* input : quantization codebook         */
-		Word16 dim,                           /* input : dimention of vector           */
-		Word16 dico_size,                     /* input : size of quantization codebook */
-		Word32 * distance                     /* output: error of quantization         */
-	     );
+        Word16 * x,                           /* input : ISF residual vector           */
+        Word16 * dico,                        /* input : quantization codebook         */
+        Word16 dim,                           /* input : dimention of vector           */
+        Word16 dico_size,                     /* input : size of quantization codebook */
+        Word32 * distance                     /* output: error of quantization         */
+         );
 
 void Reorder_isf(
-		Word16 * isf,                         /* (i/o) Q15: ISF in the frequency domain (0..0.5) */
-		Word16 min_dist,                      /* (i) Q15  : minimum distance to keep             */
-		Word16 n                              /* (i)      : number of ISF                        */
-		);
+        Word16 * isf,                         /* (i/o) Q15: ISF in the frequency domain (0..0.5) */
+        Word16 min_dist,                      /* (i) Q15  : minimum distance to keep             */
+        Word16 n                              /* (i)      : number of ISF                        */
+        );
 
 /*-----------------------------------------------------------------*
  *                       filter prototypes                         *
  *-----------------------------------------------------------------*/
 
 void Init_Decim_12k8(
-		Word16 mem[]                          /* output: memory (2*NB_COEF_DOWN) set to zeros */
-		);
+        Word16 mem[]                          /* output: memory (2*NB_COEF_DOWN) set to zeros */
+        );
 void Decim_12k8(
-		Word16 sig16k[],                      /* input:  signal to downsampling  */
-		Word16 lg,                            /* input:  length of input         */
-		Word16 sig12k8[],                     /* output: decimated signal        */
-		Word16 mem[]                          /* in/out: memory (2*NB_COEF_DOWN) */
-	       );
+        Word16 sig16k[],                      /* input:  signal to downsampling  */
+        Word16 lg,                            /* input:  length of input         */
+        Word16 sig12k8[],                     /* output: decimated signal        */
+        Word16 mem[]                          /* in/out: memory (2*NB_COEF_DOWN) */
+           );
 
 void Init_HP50_12k8(Word16 mem[]);
 void HP50_12k8(
-		Word16 signal[],                      /* input/output signal */
-		Word16 lg,                            /* lenght of signal    */
-		Word16 mem[]                          /* filter memory [6]   */
-	      );
+        Word16 signal[],                      /* input/output signal */
+        Word16 lg,                            /* lenght of signal    */
+        Word16 mem[]                          /* filter memory [6]   */
+          );
 void Init_HP400_12k8(Word16 mem[]);
 void HP400_12k8(
-		Word16 signal[],                      /* input/output signal */
-		Word16 lg,                            /* lenght of signal    */
-		Word16 mem[]                          /* filter memory [6]   */
-	       );
+        Word16 signal[],                      /* input/output signal */
+        Word16 lg,                            /* lenght of signal    */
+        Word16 mem[]                          /* filter memory [6]   */
+           );
 
 void Init_Filt_6k_7k(Word16 mem[]);
 void Filt_6k_7k(
-		Word16 signal[],                      /* input:  signal                  */
-		Word16 lg,                            /* input:  length of input         */
-		Word16 mem[]                          /* in/out: memory (size=30)        */
-	       );
+        Word16 signal[],                      /* input:  signal                  */
+        Word16 lg,                            /* input:  length of input         */
+        Word16 mem[]                          /* in/out: memory (size=30)        */
+           );
 void Filt_6k_7k_asm(
-		Word16 signal[],                      /* input:  signal                  */
-		Word16 lg,                            /* input:  length of input         */
-		Word16 mem[]                          /* in/out: memory (size=30)        */
-	       );
+        Word16 signal[],                      /* input:  signal                  */
+        Word16 lg,                            /* input:  length of input         */
+        Word16 mem[]                          /* in/out: memory (size=30)        */
+           );
 
 void LP_Decim2(
-		Word16 x[],                           /* in/out: signal to process         */
-		Word16 l,                             /* input : size of filtering         */
-		Word16 mem[]                          /* in/out: memory (size=3)           */
-	      );
+        Word16 x[],                           /* in/out: signal to process         */
+        Word16 l,                             /* input : size of filtering         */
+        Word16 mem[]                          /* in/out: memory (size=3)           */
+          );
 
 void Preemph(
-		Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
-		Word16 mu,                            /* (i) Q15 : preemphasis coefficient                */
-		Word16 lg,                            /* (i)     : lenght of filtering                    */
-		Word16 * mem                          /* (i/o)   : memory (x[-1])                         */
-	    );
+        Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
+        Word16 mu,                            /* (i) Q15 : preemphasis coefficient                */
+        Word16 lg,                            /* (i)     : lenght of filtering                    */
+        Word16 * mem                          /* (i/o)   : memory (x[-1])                         */
+        );
 void Preemph2(
-		Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
-		Word16 mu,                            /* (i) Q15 : preemphasis coefficient                */
-		Word16 lg,                            /* (i)     : lenght of filtering                    */
-		Word16 * mem                          /* (i/o)   : memory (x[-1])                         */
-	     );
+        Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
+        Word16 mu,                            /* (i) Q15 : preemphasis coefficient                */
+        Word16 lg,                            /* (i)     : lenght of filtering                    */
+        Word16 * mem                          /* (i/o)   : memory (x[-1])                         */
+         );
 void Deemph(
-		Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
-		Word16 mu,                            /* (i) Q15 : deemphasis factor                      */
-		Word16 L,                             /* (i)     : vector size                            */
-		Word16 * mem                          /* (i/o)   : memory (y[-1])                         */
-	   );
+        Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
+        Word16 mu,                            /* (i) Q15 : deemphasis factor                      */
+        Word16 L,                             /* (i)     : vector size                            */
+        Word16 * mem                          /* (i/o)   : memory (y[-1])                         */
+       );
 void Deemph2(
-		Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
-		Word16 mu,                            /* (i) Q15 : deemphasis factor                      */
-		Word16 L,                             /* (i)     : vector size                            */
-		Word16 * mem                          /* (i/o)   : memory (y[-1])                         */
-	    );
+        Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
+        Word16 mu,                            /* (i) Q15 : deemphasis factor                      */
+        Word16 L,                             /* (i)     : vector size                            */
+        Word16 * mem                          /* (i/o)   : memory (y[-1])                         */
+        );
 void Deemph_32(
-		Word16 x_hi[],                        /* (i)     : input signal (bit31..16) */
-		Word16 x_lo[],                        /* (i)     : input signal (bit15..4)  */
-		Word16 y[],                           /* (o)     : output signal (x16)      */
-		Word16 mu,                            /* (i) Q15 : deemphasis factor        */
-		Word16 L,                             /* (i)     : vector size              */
-		Word16 * mem                          /* (i/o)   : memory (y[-1])           */
-	      );
+        Word16 x_hi[],                        /* (i)     : input signal (bit31..16) */
+        Word16 x_lo[],                        /* (i)     : input signal (bit15..4)  */
+        Word16 y[],                           /* (o)     : output signal (x16)      */
+        Word16 mu,                            /* (i) Q15 : deemphasis factor        */
+        Word16 L,                             /* (i)     : vector size              */
+        Word16 * mem                          /* (i/o)   : memory (y[-1])           */
+          );
 
 void Deemph_32_asm(
-		Word16 x_hi[],                        /* (i)     : input signal (bit31..16) */
-		Word16 x_lo[],                        /* (i)     : input signal (bit15..4)  */
-		Word16 y[],                           /* (o)     : output signal (x16)      */
-		Word16 * mem                          /* (i/o)   : memory (y[-1])           */
-	      );
+        Word16 x_hi[],                        /* (i)     : input signal (bit31..16) */
+        Word16 x_lo[],                        /* (i)     : input signal (bit15..4)  */
+        Word16 y[],                           /* (o)     : output signal (x16)      */
+        Word16 * mem                          /* (i/o)   : memory (y[-1])           */
+          );
 
 void Convolve(
-		Word16 x[],                           /* (i)     : input vector                              */
-		Word16 h[],                           /* (i) Q15    : impulse response                       */
-		Word16 y[],                           /* (o) 12 bits: output vector                          */
-		Word16 L                              /* (i)     : vector size                               */
-	     );
+        Word16 x[],                           /* (i)     : input vector                              */
+        Word16 h[],                           /* (i) Q15    : impulse response                       */
+        Word16 y[],                           /* (o) 12 bits: output vector                          */
+        Word16 L                              /* (i)     : vector size                               */
+         );
 
 void Convolve_asm(
-		Word16 x[],                           /* (i)     : input vector                              */
-		Word16 h[],                           /* (i) Q15    : impulse response                       */
-		Word16 y[],                           /* (o) 12 bits: output vector                          */
-		Word16 L                              /* (i)     : vector size                               */
-	     );
+        Word16 x[],                           /* (i)     : input vector                              */
+        Word16 h[],                           /* (i) Q15    : impulse response                       */
+        Word16 y[],                           /* (o) 12 bits: output vector                          */
+        Word16 L                              /* (i)     : vector size                               */
+         );
 
 void Residu(
-		Word16 a[],                           /* (i) Q12 : prediction coefficients                     */
-		Word16 x[],                           /* (i)     : speech (values x[-m..-1] are needed         */
-		Word16 y[],                           /* (o)     : residual signal                             */
-		Word16 lg                             /* (i)     : size of filtering                           */
-		);
+        Word16 a[],                           /* (i) Q12 : prediction coefficients                     */
+        Word16 x[],                           /* (i)     : speech (values x[-m..-1] are needed         */
+        Word16 y[],                           /* (o)     : residual signal                             */
+        Word16 lg                             /* (i)     : size of filtering                           */
+        );
 
 void Residu_opt(
-		Word16 a[],                           /* (i) Q12 : prediction coefficients                     */
-		Word16 x[],                           /* (i)     : speech (values x[-m..-1] are needed         */
-		Word16 y[],                           /* (o)     : residual signal                             */
-		Word16 lg                             /* (i)     : size of filtering                           */
-		);
+        Word16 a[],                           /* (i) Q12 : prediction coefficients                     */
+        Word16 x[],                           /* (i)     : speech (values x[-m..-1] are needed         */
+        Word16 y[],                           /* (o)     : residual signal                             */
+        Word16 lg                             /* (i)     : size of filtering                           */
+        );
 
 void Syn_filt(
-	Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients           */
-	Word16 x[],                           /* (i)     : input signal                             */
-	Word16 y[],                           /* (o)     : output signal                            */
-	Word16 lg,                            /* (i)     : size of filtering                        */
-	Word16 mem[],                         /* (i/o)   : memory associated with this filtering.   */
-	Word16 update                         /* (i)     : 0=no update, 1=update of memory.         */
-	);
+    Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients           */
+    Word16 x[],                           /* (i)     : input signal                             */
+    Word16 y[],                           /* (o)     : output signal                            */
+    Word16 lg,                            /* (i)     : size of filtering                        */
+    Word16 mem[],                         /* (i/o)   : memory associated with this filtering.   */
+    Word16 update                         /* (i)     : 0=no update, 1=update of memory.         */
+    );
 
 void Syn_filt_asm(
-	Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients           */
-	Word16 x[],                           /* (i)     : input signal                             */
-	Word16 y[],                           /* (o)     : output signal                            */
-	Word16 mem[]                          /* (i/o)   : memory associated with this filtering.   */
-	);
+    Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients           */
+    Word16 x[],                           /* (i)     : input signal                             */
+    Word16 y[],                           /* (o)     : output signal                            */
+    Word16 mem[]                          /* (i/o)   : memory associated with this filtering.   */
+    );
 
 void Syn_filt_32(
-	Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients */
-	Word16 m,                             /* (i)     : order of LP filter             */
-	Word16 exc[],                         /* (i) Qnew: excitation (exc[i] >> Qnew)    */
-	Word16 Qnew,                          /* (i)     : exc scaling = 0(min) to 8(max) */
-	Word16 sig_hi[],                      /* (o) /16 : synthesis high                 */
-	Word16 sig_lo[],                      /* (o) /16 : synthesis low                  */
-	Word16 lg                             /* (i)     : size of filtering              */
-	);
+    Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients */
+    Word16 m,                             /* (i)     : order of LP filter             */
+    Word16 exc[],                         /* (i) Qnew: excitation (exc[i] >> Qnew)    */
+    Word16 Qnew,                          /* (i)     : exc scaling = 0(min) to 8(max) */
+    Word16 sig_hi[],                      /* (o) /16 : synthesis high                 */
+    Word16 sig_lo[],                      /* (o) /16 : synthesis low                  */
+    Word16 lg                             /* (i)     : size of filtering              */
+    );
 
 void Syn_filt_32_asm(
-	Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients */
-	Word16 m,                             /* (i)     : order of LP filter             */
-	Word16 exc[],                         /* (i) Qnew: excitation (exc[i] >> Qnew)    */
-	Word16 Qnew,                          /* (i)     : exc scaling = 0(min) to 8(max) */
-	Word16 sig_hi[],                      /* (o) /16 : synthesis high                 */
-	Word16 sig_lo[],                      /* (o) /16 : synthesis low                  */
-	Word16 lg                             /* (i)     : size of filtering              */
-	);
+    Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients */
+    Word16 m,                             /* (i)     : order of LP filter             */
+    Word16 exc[],                         /* (i) Qnew: excitation (exc[i] >> Qnew)    */
+    Word16 Qnew,                          /* (i)     : exc scaling = 0(min) to 8(max) */
+    Word16 sig_hi[],                      /* (o) /16 : synthesis high                 */
+    Word16 sig_lo[],                      /* (o) /16 : synthesis low                  */
+    Word16 lg                             /* (i)     : size of filtering              */
+    );
 /*-----------------------------------------------------------------*
  *                       pitch prototypes                          *
  *-----------------------------------------------------------------*/
@@ -443,12 +443,12 @@
      Word16 nbbits,                        /* (i) : 20, 36, 44, 52, 64, 72 or 88 bits                */
      Word16 ser_size,                      /* (i) : bit rate                                         */
      Word16 _index[]                       /* (o) : index (20): 5+5+5+5 = 20 bits.                   */
-					   /* (o) : index (36): 9+9+9+9 = 36 bits.                   */
-					   /* (o) : index (44): 13+9+13+9 = 44 bits.                 */
-					   /* (o) : index (52): 13+13+13+13 = 52 bits.               */
-					   /* (o) : index (64): 2+2+2+2+14+14+14+14 = 64 bits.       */
-					   /* (o) : index (72): 10+2+10+2+10+14+10+14 = 72 bits.     */
-					   /* (o) : index (88): 11+11+11+11+11+11+11+11 = 88 bits.   */
+                       /* (o) : index (36): 9+9+9+9 = 36 bits.                   */
+                       /* (o) : index (44): 13+9+13+9 = 44 bits.                 */
+                       /* (o) : index (52): 13+13+13+13 = 52 bits.               */
+                       /* (o) : index (64): 2+2+2+2+14+14+14+14 = 64 bits.       */
+                       /* (o) : index (72): 10+2+10+2+10+14+10+14 = 72 bits.     */
+                       /* (o) : index (88): 11+11+11+11+11+11+11+11 = 88 bits.   */
 );
 
 void Pit_shrp(
diff --git a/media/libstagefright/codecs/amrwbenc/inc/basic_op.h b/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
index f42a27c..8165f69 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
@@ -25,8 +25,8 @@
 #define MAX_32 (Word32)0x7fffffffL
 #define MIN_32 (Word32)0x80000000L
 
-#define MAX_16 (Word16)+32767	/* 0x7fff */
-#define MIN_16 (Word16)-32768	/* 0x8000 */
+#define MAX_16 (Word16)+32767   /* 0x7fff */
+#define MIN_16 (Word16)-32768   /* 0x8000 */
 
 
 #define  static_vo  static __inline
@@ -41,22 +41,22 @@
 #define L_negate(L_var1) (((L_var1) == (MIN_32)) ? (MAX_32) : (-(L_var1)))                 /* Long negate,     2*/
 
 
-#define extract_h(a)			((Word16)(a >> 16))
-#define extract_l(x)            	(Word16)((x))
-#define add1(a,b)			(a + b)
-#define vo_L_msu(a,b,c)			( a - (( b * c ) << 1) )
+#define extract_h(a)            ((Word16)(a >> 16))
+#define extract_l(x)                (Word16)((x))
+#define add1(a,b)           (a + b)
+#define vo_L_msu(a,b,c)         ( a - (( b * c ) << 1) )
 #define vo_mult32(a, b)         ((a) * (b))
-#define vo_mult(a,b)			(( a * b ) >> 15 )
-#define	vo_L_mult(a,b)	    		(((a) * (b)) << 1)
-#define vo_shr_r(var1, var2)   		((var1+((Word16)(1L<<(var2-1))))>>var2)
-#define vo_sub(a,b)			(a - b)
-#define vo_L_deposit_h(a)		((Word32)((a) << 16))
-#define vo_round(a)			((a + 0x00008000) >> 16)
-#define vo_extract_l(a)			((Word16)(a))
-#define vo_L_add(a,b)			(a + b)
-#define vo_L_sub(a,b)			(a - b)
-#define vo_mult_r(a,b)			((( a * b ) + 0x4000 ) >> 15 )
-#define vo_negate(a)		        (-a)
+#define vo_mult(a,b)            (( a * b ) >> 15 )
+#define vo_L_mult(a,b)              (((a) * (b)) << 1)
+#define vo_shr_r(var1, var2)        ((var1+((Word16)(1L<<(var2-1))))>>var2)
+#define vo_sub(a,b)         (a - b)
+#define vo_L_deposit_h(a)       ((Word32)((a) << 16))
+#define vo_round(a)         ((((a) >> 15) + 1) >> 1)
+#define vo_extract_l(a)         ((Word16)(a))
+#define vo_L_add(a,b)           (a + b)
+#define vo_L_sub(a,b)           (a - b)
+#define vo_mult_r(a,b)          (((( a * b ) >> 14) + 1 ) >> 1 )
+#define vo_negate(a)                (-a)
 #define vo_L_shr_r(L_var1, var2)        ((L_var1+((Word32)(1L<<(var2-1))))>>var2)
 
 
@@ -65,25 +65,25 @@
 |   Prototypes for basic arithmetic operators                               |
 |___________________________________________________________________________|
 */
-static_vo Word16 add (Word16 var1, Word16 var2);				/* Short add,1 */
-static_vo Word16 sub (Word16 var1, Word16 var2);				/* Short sub,1 */
+static_vo Word16 add (Word16 var1, Word16 var2);                /* Short add,1 */
+static_vo Word16 sub (Word16 var1, Word16 var2);                /* Short sub,1 */
 static_vo Word16 shl (Word16 var1, Word16 var2);                                /* Short shift left,    1   */
 static_vo Word16 shr (Word16 var1, Word16 var2);                                /* Short shift right,   1   */
 static_vo Word16 mult (Word16 var1, Word16 var2);                               /* Short mult,          1   */
 static_vo Word32 L_mult (Word16 var1, Word16 var2);                             /* Long mult,           1   */
 static_vo Word16 voround (Word32 L_var1);                                       /* Round,               1   */
-static_vo Word32 L_mac (Word32 L_var3, Word16 var1, Word16 var2);            	/* Mac,  1  */
-static_vo Word32 L_msu (Word32 L_var3, Word16 var1, Word16 var2);   		/* Msu,  1  */
-static_vo Word32 L_add (Word32 L_var1, Word32 L_var2);   		 	/* Long add,        2 */
-static_vo Word32 L_sub (Word32 L_var1, Word32 L_var2);   			/* Long sub,        2 */
-static_vo Word16 mult_r (Word16 var1, Word16 var2);      		 	/* Mult with round, 2 */
-static_vo Word32 L_shl2(Word32 L_var1, Word16 var2);             		/* var2 > 0*/
-static_vo Word32 L_shl (Word32 L_var1, Word16 var2);    	 	 	/* Long shift left, 2 */
-static_vo Word32 L_shr (Word32 L_var1, Word16 var2);    	 	 	/* Long shift right, 2*/
-static_vo Word32 L_shr_r (Word32 L_var1, Word16 var2); 				/* Long shift right with round,  3   */
-static_vo Word16 norm_s (Word16 var1);             				/* Short norm,           15  */
-static_vo Word16 div_s (Word16 var1, Word16 var2); 				/* Short division,       18  */
-static_vo Word16 norm_l (Word32 L_var1);           				/* Long norm,            30  */
+static_vo Word32 L_mac (Word32 L_var3, Word16 var1, Word16 var2);               /* Mac,  1  */
+static_vo Word32 L_msu (Word32 L_var3, Word16 var1, Word16 var2);           /* Msu,  1  */
+static_vo Word32 L_add (Word32 L_var1, Word32 L_var2);              /* Long add,        2 */
+static_vo Word32 L_sub (Word32 L_var1, Word32 L_var2);              /* Long sub,        2 */
+static_vo Word16 mult_r (Word16 var1, Word16 var2);                 /* Mult with round, 2 */
+static_vo Word32 L_shl2(Word32 L_var1, Word16 var2);                    /* var2 > 0*/
+static_vo Word32 L_shl (Word32 L_var1, Word16 var2);                /* Long shift left, 2 */
+static_vo Word32 L_shr (Word32 L_var1, Word16 var2);                /* Long shift right, 2*/
+static_vo Word32 L_shr_r (Word32 L_var1, Word16 var2);              /* Long shift right with round,  3   */
+static_vo Word16 norm_s (Word16 var1);                          /* Short norm,           15  */
+static_vo Word16 div_s (Word16 var1, Word16 var2);              /* Short division,       18  */
+static_vo Word16 norm_l (Word32 L_var1);                        /* Long norm,            30  */
 
 /*___________________________________________________________________________
 |                                                                           |
@@ -125,11 +125,11 @@
 */
 static_vo Word16 add (Word16 var1, Word16 var2)
 {
-	Word16 var_out;
-	Word32 L_sum;
-	L_sum = (Word32) var1 + var2;
-	var_out = saturate (L_sum);
-	return (var_out);
+    Word16 var_out;
+    Word32 L_sum;
+    L_sum = (Word32) var1 + var2;
+    var_out = saturate (L_sum);
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -168,11 +168,11 @@
 
 static_vo Word16 sub (Word16 var1, Word16 var2)
 {
-	Word16 var_out;
-	Word32 L_diff;
-	L_diff = (Word32) var1 - var2;
-	var_out = saturate (L_diff);
-	return (var_out);
+    Word16 var_out;
+    Word32 L_diff;
+    L_diff = (Word32) var1 - var2;
+    var_out = saturate (L_diff);
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -212,27 +212,31 @@
 
 static_vo Word16 shl (Word16 var1, Word16 var2)
 {
-	Word16 var_out;
-	Word32 result;
-	if (var2 < 0)
-	{
-		if (var2 < -16)
-			var2 = -16;
-		var_out = var1 >> ((Word16)-var2);
-	}
-	else
-	{
-		result = (Word32) var1 *((Word32) 1 << var2);
-		if ((var2 > 15 && var1 != 0) || (result != (Word32) ((Word16) result)))
-		{
-			var_out = (Word16)((var1 > 0) ? MAX_16 : MIN_16);
-		}
-		else
-		{
-			var_out = extract_l (result);
-		}
-	}
-	return (var_out);
+    Word16 var_out;
+    Word32 result;
+    if (var2 < 0)
+    {
+        if (var2 < -16)
+            var2 = -16;
+        var_out = var1 >> ((Word16)-var2);
+    }
+    else
+    {
+        if (var2 > 15 && var1 != 0)
+        {
+            var_out = (Word16)((var1 > 0) ? MAX_16 : MIN_16);
+        }
+        else
+        {
+            result = (Word32) var1 *((Word32) 1 << var2);
+            if ((result != (Word32) ((Word16) result))) {
+                var_out = (Word16)((var1 > 0) ? MAX_16 : MIN_16);
+            } else {
+                var_out = extract_l (result);
+            }
+        }
+    }
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -272,32 +276,32 @@
 
 static_vo Word16 shr (Word16 var1, Word16 var2)
 {
-	Word16 var_out;
-	if (var2 < 0)
-	{
-		if (var2 < -16)
-			var2 = -16;
-		var_out = shl(var1, (Word16)-var2);
-	}
-	else
-	{
-		if (var2 >= 15)
-		{
-			var_out = (Word16)((var1 < 0) ? -1 : 0);
-		}
-		else
-		{
-			if (var1 < 0)
-			{
-				var_out = (Word16)(~((~var1) >> var2));
-			}
-			else
-			{
-				var_out = (Word16)(var1 >> var2);
-			}
-		}
-	}
-	return (var_out);
+    Word16 var_out;
+    if (var2 < 0)
+    {
+        if (var2 < -16)
+            var2 = -16;
+        var_out = shl(var1, (Word16)-var2);
+    }
+    else
+    {
+        if (var2 >= 15)
+        {
+            var_out = (Word16)((var1 < 0) ? -1 : 0);
+        }
+        else
+        {
+            if (var1 < 0)
+            {
+                var_out = (Word16)(~((~var1) >> var2));
+            }
+            else
+            {
+                var_out = (Word16)(var1 >> var2);
+            }
+        }
+    }
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -337,14 +341,14 @@
 
 static_vo Word16 mult (Word16 var1, Word16 var2)
 {
-	Word16 var_out;
-	Word32 L_product;
-	L_product = (Word32) var1 *(Word32) var2;
-	L_product = (L_product & (Word32) 0xffff8000L) >> 15;
-	if (L_product & (Word32) 0x00010000L)
-		L_product = L_product | (Word32) 0xffff0000L;
-	var_out = saturate (L_product);
-	return (var_out);
+    Word16 var_out;
+    Word32 L_product;
+    L_product = (Word32) var1 *(Word32) var2;
+    L_product = (L_product & (Word32) 0xffff8000L) >> 15;
+    if (L_product & (Word32) 0x00010000L)
+        L_product = L_product | (Word32) 0xffff0000L;
+    var_out = saturate (L_product);
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -384,17 +388,17 @@
 
 static_vo Word32 L_mult (Word16 var1, Word16 var2)
 {
-	Word32 L_var_out;
-	L_var_out = (Word32) var1 *(Word32) var2;
-	if (L_var_out != (Word32) 0x40000000L)
-	{
-		L_var_out *= 2;
-	}
-	else
-	{
-		L_var_out = MAX_32;
-	}
-	return (L_var_out);
+    Word32 L_var_out;
+    L_var_out = (Word32) var1 *(Word32) var2;
+    if (L_var_out != (Word32) 0x40000000L)
+    {
+        L_var_out *= 2;
+    }
+    else
+    {
+        L_var_out = MAX_32;
+    }
+    return (L_var_out);
 }
 
 /*___________________________________________________________________________
@@ -430,11 +434,11 @@
 
 static_vo Word16 voround (Word32 L_var1)
 {
-	Word16 var_out;
-	Word32 L_rounded;
-	L_rounded = L_add (L_var1, (Word32) 0x00008000L);
-	var_out = extract_h (L_rounded);
-	return (var_out);
+    Word16 var_out;
+    Word32 L_rounded;
+    L_rounded = L_add (L_var1, (Word32) 0x00008000L);
+    var_out = extract_h (L_rounded);
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -476,11 +480,11 @@
 
 static_vo Word32 L_mac (Word32 L_var3, Word16 var1, Word16 var2)
 {
-	Word32 L_var_out;
-	Word32 L_product;
-	L_product = ((var1 * var2) << 1);
-	L_var_out = L_add (L_var3, L_product);
-	return (L_var_out);
+    Word32 L_var_out;
+    Word32 L_product;
+    L_product = ((var1 * var2) << 1);
+    L_var_out = L_add (L_var3, L_product);
+    return (L_var_out);
 }
 
 /*___________________________________________________________________________
@@ -522,11 +526,11 @@
 
 static_vo Word32 L_msu (Word32 L_var3, Word16 var1, Word16 var2)
 {
-	Word32 L_var_out;
-	Word32 L_product;
-	L_product = (var1 * var2)<<1;
-	L_var_out = L_sub (L_var3, L_product);
-	return (L_var_out);
+    Word32 L_var_out;
+    Word32 L_product;
+    L_product = (var1 * var2)<<1;
+    L_var_out = L_sub (L_var3, L_product);
+    return (L_var_out);
 }
 
 /*___________________________________________________________________________
@@ -561,18 +565,19 @@
 |___________________________________________________________________________|
 */
 
+__attribute__((no_sanitize("integer")))
 static_vo Word32 L_add (Word32 L_var1, Word32 L_var2)
 {
-	Word32 L_var_out;
-	L_var_out = L_var1 + L_var2;
-	if (((L_var1 ^ L_var2) & MIN_32) == 0)
-	{
-		if ((L_var_out ^ L_var1) & MIN_32)
-		{
-			L_var_out = (L_var1 < 0) ? MIN_32 : MAX_32;
-		}
-	}
-	return (L_var_out);
+    Word32 L_var_out;
+    L_var_out = L_var1 + L_var2;
+    if (((L_var1 ^ L_var2) & MIN_32) == 0)
+    {
+        if ((L_var_out ^ L_var1) & MIN_32)
+        {
+            L_var_out = (L_var1 < 0) ? MIN_32 : MAX_32;
+        }
+    }
+    return (L_var_out);
 }
 
 /*___________________________________________________________________________
@@ -607,18 +612,19 @@
 |___________________________________________________________________________|
 */
 
+__attribute__((no_sanitize("integer")))
 static_vo Word32 L_sub (Word32 L_var1, Word32 L_var2)
 {
-	Word32 L_var_out;
-	L_var_out = L_var1 - L_var2;
-	if (((L_var1 ^ L_var2) & MIN_32) != 0)
-	{
-		if ((L_var_out ^ L_var1) & MIN_32)
-		{
-			L_var_out = (L_var1 < 0L) ? MIN_32 : MAX_32;
-		}
-	}
-	return (L_var_out);
+    Word32 L_var_out;
+    L_var_out = L_var1 - L_var2;
+    if (((L_var1 ^ L_var2) & MIN_32) != 0)
+    {
+        if ((L_var_out ^ L_var1) & MIN_32)
+        {
+            L_var_out = (L_var1 < 0L) ? MIN_32 : MAX_32;
+        }
+    }
+    return (L_var_out);
 }
 
 
@@ -658,18 +664,18 @@
 
 static_vo Word16 mult_r (Word16 var1, Word16 var2)
 {
-	Word16 var_out;
-	Word32 L_product_arr;
-	L_product_arr = (Word32) var1 *(Word32) var2;       /* product */
-	L_product_arr += (Word32) 0x00004000L;      /* round */
-	L_product_arr &= (Word32) 0xffff8000L;
-	L_product_arr >>= 15;       /* shift */
-	if (L_product_arr & (Word32) 0x00010000L)   /* sign extend when necessary */
-	{
-		L_product_arr |= (Word32) 0xffff0000L;
-	}
-	var_out = saturate (L_product_arr);
-	return (var_out);
+    Word16 var_out;
+    Word32 L_product_arr;
+    L_product_arr = (Word32) var1 *(Word32) var2;       /* product */
+    L_product_arr += (Word32) 0x00004000L;      /* round */
+    L_product_arr &= (Word32) 0xffff8000L;
+    L_product_arr >>= 15;       /* shift */
+    if (L_product_arr & (Word32) 0x00010000L)   /* sign extend when necessary */
+    {
+        L_product_arr |= (Word32) 0xffff0000L;
+    }
+    var_out = saturate (L_product_arr);
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -708,61 +714,61 @@
 
 static_vo Word32 L_shl (Word32 L_var1, Word16 var2)
 {
-	Word32 L_var_out = 0L;
-	if (var2 <= 0)
-	{
-		if (var2 < -32)
-			var2 = -32;
-		L_var_out = (L_var1 >> (Word16)-var2);
-	}
-	else
-	{
-		for (; var2 > 0; var2--)
-		{
-			if (L_var1 > (Word32) 0X3fffffffL)
-			{
-				L_var_out = MAX_32;
-				break;
-			}
-			else
-			{
-				if (L_var1 < (Word32) 0xc0000000L)
-				{
-					//Overflow = 1;
-					L_var_out = MIN_32;
-					break;
-				}
-			}
-			L_var1 *= 2;
-			L_var_out = L_var1;
-		}
-	}
-	return (L_var_out);
+    Word32 L_var_out = 0L;
+    if (var2 <= 0)
+    {
+        if (var2 < -32)
+            var2 = -32;
+        L_var_out = (L_var1 >> (Word16)-var2);
+    }
+    else
+    {
+        for (; var2 > 0; var2--)
+        {
+            if (L_var1 > (Word32) 0X3fffffffL)
+            {
+                L_var_out = MAX_32;
+                break;
+            }
+            else
+            {
+                if (L_var1 < (Word32) 0xc0000000L)
+                {
+                    //Overflow = 1;
+                    L_var_out = MIN_32;
+                    break;
+                }
+            }
+            L_var1 *= 2;
+            L_var_out = L_var1;
+        }
+    }
+    return (L_var_out);
 }
 
 static_vo Word32 L_shl2(Word32 L_var1, Word16 var2)
 {
-	Word32 L_var_out = 0L;
+    Word32 L_var_out = 0L;
 
-	for (; var2 > 0; var2--)
-	{
-		if (L_var1 > (Word32) 0X3fffffffL)
-		{
-			L_var_out = MAX_32;
-			break;
-		}
-		else
-		{
-			if (L_var1 < (Word32) 0xc0000000L)
-			{
-				L_var_out = MIN_32;
-				break;
-			}
-		}
-		L_var1 <<=1 ;
-		L_var_out = L_var1;
-	}
-	return (L_var_out);
+    for (; var2 > 0; var2--)
+    {
+        if (L_var1 > (Word32) 0X3fffffffL)
+        {
+            L_var_out = MAX_32;
+            break;
+        }
+        else
+        {
+            if (L_var1 < (Word32) 0xc0000000L)
+            {
+                L_var_out = MIN_32;
+                break;
+            }
+        }
+        L_var1 <<=1 ;
+        L_var_out = L_var1;
+    }
+    return (L_var_out);
 }
 
 /*___________________________________________________________________________
@@ -801,32 +807,32 @@
 
 static_vo Word32 L_shr (Word32 L_var1, Word16 var2)
 {
-	Word32 L_var_out;
-	if (var2 < 0)
-	{
-		if (var2 < -32)
-			var2 = -32;
-		L_var_out = L_shl2(L_var1, (Word16)-var2);
-	}
-	else
-	{
-		if (var2 >= 31)
-		{
-			L_var_out = (L_var1 < 0L) ? -1 : 0;
-		}
-		else
-		{
-			if (L_var1 < 0)
-			{
-				L_var_out = ~((~L_var1) >> var2);
-			}
-			else
-			{
-				L_var_out = L_var1 >> var2;
-			}
-		}
-	}
-	return (L_var_out);
+    Word32 L_var_out;
+    if (var2 < 0)
+    {
+        if (var2 < -32)
+            var2 = -32;
+        L_var_out = L_shl2(L_var1, (Word16)-var2);
+    }
+    else
+    {
+        if (var2 >= 31)
+        {
+            L_var_out = (L_var1 < 0L) ? -1 : 0;
+        }
+        else
+        {
+            if (L_var1 < 0)
+            {
+                L_var_out = ~((~L_var1) >> var2);
+            }
+            else
+            {
+                L_var_out = L_var1 >> var2;
+            }
+        }
+    }
+    return (L_var_out);
 }
 
 /*___________________________________________________________________________
@@ -873,23 +879,23 @@
 
 static_vo Word32 L_shr_r (Word32 L_var1, Word16 var2)
 {
-	Word32 L_var_out;
-	if (var2 > 31)
-	{
-		L_var_out = 0;
-	}
-	else
-	{
-		L_var_out = L_shr (L_var1, var2);
-		if (var2 > 0)
-		{
-			if ((L_var1 & ((Word32) 1 << (var2 - 1))) != 0)
-			{
-				L_var_out++;
-			}
-		}
-	}
-	return (L_var_out);
+    Word32 L_var_out;
+    if (var2 > 31)
+    {
+        L_var_out = 0;
+    }
+    else
+    {
+        L_var_out = L_shr (L_var1, var2);
+        if (var2 > 0)
+        {
+            if ((L_var1 & ((Word32) 1 << (var2 - 1))) != 0)
+            {
+                L_var_out++;
+            }
+        }
+    }
+    return (L_var_out);
 }
 
 /*___________________________________________________________________________
@@ -927,30 +933,30 @@
 
 static_vo Word16 norm_s (Word16 var1)
 {
-	Word16 var_out = 0;
-	if (var1 == 0)
-	{
-		var_out = 0;
-	}
-	else
-	{
-		if (var1 == -1)
-		{
-			var_out = 15;
-		}
-		else
-		{
-			if (var1 < 0)
-			{
-				var1 = (Word16)~var1;
-			}
-			for (var_out = 0; var1 < 0x4000; var_out++)
-			{
-				var1 <<= 1;
-			}
-		}
-	}
-	return (var_out);
+    Word16 var_out = 0;
+    if (var1 == 0)
+    {
+        var_out = 0;
+    }
+    else
+    {
+        if (var1 == -1)
+        {
+            var_out = 15;
+        }
+        else
+        {
+            if (var1 < 0)
+            {
+                var1 = (Word16)~var1;
+            }
+            for (var_out = 0; var1 < 0x4000; var_out++)
+            {
+                var1 <<= 1;
+            }
+        }
+    }
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -992,47 +998,47 @@
 
 static_vo Word16 div_s (Word16 var1, Word16 var2)
 {
-	Word16 var_out = 0;
-	Word16 iteration;
-	Word32 L_num;
-	Word32 L_denom;
-	if ((var1 < 0) || (var2 < 0))
-	{
-		var_out = MAX_16;
-		return var_out;
-	}
-	if (var2 == 0)
-	{
-		var_out = MAX_16;
-		return var_out;
-	}
-	if (var1 == 0)
-	{
-		var_out = 0;
-	}
-	else
-	{
-		if (var1 == var2)
-		{
-			var_out = MAX_16;
-		}
-		else
-		{
-			L_num = L_deposit_l (var1);
-			L_denom = L_deposit_l(var2);
-			for (iteration = 0; iteration < 15; iteration++)
-			{
-				var_out <<= 1;
-				L_num <<= 1;
-				if (L_num >= L_denom)
-				{
-					L_num -= L_denom;
-					var_out += 1;
-				}
-			}
-		}
-	}
-	return (var_out);
+    Word16 var_out = 0;
+    Word16 iteration;
+    Word32 L_num;
+    Word32 L_denom;
+    if ((var1 < 0) || (var2 < 0))
+    {
+        var_out = MAX_16;
+        return var_out;
+    }
+    if (var2 == 0)
+    {
+        var_out = MAX_16;
+        return var_out;
+    }
+    if (var1 == 0)
+    {
+        var_out = 0;
+    }
+    else
+    {
+        if (var1 == var2)
+        {
+            var_out = MAX_16;
+        }
+        else
+        {
+            L_num = L_deposit_l (var1);
+            L_denom = L_deposit_l(var2);
+            for (iteration = 0; iteration < 15; iteration++)
+            {
+                var_out <<= 1;
+                L_num <<= 1;
+                if (L_num >= L_denom)
+                {
+                    L_num -= L_denom;
+                    var_out += 1;
+                }
+            }
+        }
+    }
+    return (var_out);
 }
 
 /*___________________________________________________________________________
@@ -1070,20 +1076,20 @@
 
 static_vo Word16 norm_l (Word32 L_var1)
 {
-	Word16 var_out = 0;
-	if (L_var1 != 0)
-	{
-		var_out = 31;
-		if (L_var1 != (Word32) 0xffffffffL)
-		{
-			L_var1 ^= (L_var1 >>31);
-			for (var_out = 0; L_var1 < (Word32) 0x40000000L; var_out++)
-			{
-				L_var1 <<= 1;
-			}
-		}
-	}
-	return (var_out);
+    Word16 var_out = 0;
+    if (L_var1 != 0)
+    {
+        var_out = 31;
+        if (L_var1 != (Word32) 0xffffffffL)
+        {
+            L_var1 ^= (L_var1 >>31);
+            for (var_out = 0; L_var1 < (Word32) 0x40000000L; var_out++)
+            {
+                L_var1 <<= 1;
+            }
+        }
+    }
+    return (var_out);
 }
 
 #endif //__BASIC_OP_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/bits.h b/media/libstagefright/codecs/amrwbenc/inc/bits.h
index e880684..ff9c0c1 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/bits.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/bits.h
@@ -18,7 +18,7 @@
 /*--------------------------------------------------------------------------*
 *                         BITS.H                                           *
 *--------------------------------------------------------------------------*
-*       Number of bits for different modes			           *
+*       Number of bits for different modes                     *
 *--------------------------------------------------------------------------*/
 
 #ifndef __BITS_H__
@@ -52,16 +52,16 @@
 #define RX_FRAME_TYPE (Word16)0x6b20
 
 static const Word16 nb_of_bits[NUM_OF_MODES] = {
-	NBBITS_7k,
-	NBBITS_9k,
-	NBBITS_12k,
-	NBBITS_14k,
-	NBBITS_16k,
-	NBBITS_18k,
-	NBBITS_20k,
-	NBBITS_23k,
-	NBBITS_24k,
-	NBBITS_SID
+    NBBITS_7k,
+    NBBITS_9k,
+    NBBITS_12k,
+    NBBITS_14k,
+    NBBITS_16k,
+    NBBITS_18k,
+    NBBITS_20k,
+    NBBITS_23k,
+    NBBITS_24k,
+    NBBITS_SID
 };
 
 /*typedef struct
@@ -74,18 +74,18 @@
 
 //typedef struct
 //{
-//	Word16 prev_ft;
-//	Word16 prev_mode;
+//  Word16 prev_ft;
+//  Word16 prev_mode;
 //} RX_State;
 
 int PackBits(Word16 prms[], Word16 coding_mode, Word16 mode, Coder_State *st);
 
 
 void Parm_serial(
-		Word16 value,                         /* input : parameter value */
-		Word16 no_of_bits,                    /* input : number of bits  */
-		Word16 ** prms
-		);
+        Word16 value,                         /* input : parameter value */
+        Word16 no_of_bits,                    /* input : number of bits  */
+        Word16 ** prms
+        );
 
 
 #endif  //__BITS_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/cod_main.h b/media/libstagefright/codecs/amrwbenc/inc/cod_main.h
index 53ca55e..170981e 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/cod_main.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/cod_main.h
@@ -18,7 +18,7 @@
 /*--------------------------------------------------------------------------*
  *                         COD_MAIN.H                                       *
  *--------------------------------------------------------------------------*
- *       Static memory in the encoder				            *
+ *       Static memory in the encoder                           *
  *--------------------------------------------------------------------------*/
 #ifndef __COD_MAIN_H__
 #define __COD_MAIN_H__
@@ -79,21 +79,21 @@
     Word16 vad_hist;
     Word16 gain_alpha;
     /*  TX_State structure  */
-	Word16 sid_update_counter;
+    Word16 sid_update_counter;
     Word16 sid_handover_debt;
     Word16 prev_ft;
-	Word16 allow_dtx;
-	/*some input/output buffer parameters */
-	unsigned char       *inputStream;
-	int			        inputSize;
-	VOAMRWBMODE  		mode;
-	VOAMRWBFRAMETYPE	frameType;
-	unsigned short      *outputStream;
-	int			        outputSize;
-	FrameStream         *stream;
-	VO_MEM_OPERATOR     *pvoMemop;
-	VO_MEM_OPERATOR     voMemoprator;
-	VO_PTR              hCheck;
+    Word16 allow_dtx;
+    /*some input/output buffer parameters */
+    unsigned char       *inputStream;
+    int                 inputSize;
+    VOAMRWBMODE         mode;
+    VOAMRWBFRAMETYPE    frameType;
+    unsigned short      *outputStream;
+    int                 outputSize;
+    FrameStream         *stream;
+    VO_MEM_OPERATOR     *pvoMemop;
+    VO_MEM_OPERATOR     voMemoprator;
+    VO_PTR              hCheck;
 } Coder_State;
 
 typedef void* HAMRENC;
diff --git a/media/libstagefright/codecs/amrwbenc/inc/dtx.h b/media/libstagefright/codecs/amrwbenc/inc/dtx.h
index 0bdda67..82a9bf4 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/dtx.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/dtx.h
@@ -16,9 +16,9 @@
 
 
 /*--------------------------------------------------------------------------*
- *                         DTX.H					    *
+ *                         DTX.H                        *
  *--------------------------------------------------------------------------*
- *       Static memory, constants and frametypes for the DTX 		    *
+ *       Static memory, constants and frametypes for the DTX            *
  *--------------------------------------------------------------------------*/
 
 #ifndef __DTX_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/log2.h b/media/libstagefright/codecs/amrwbenc/inc/log2.h
index b065eb4..3d9a6c4 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/log2.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/log2.h
@@ -45,17 +45,17 @@
 ********************************************************************************
 */
 void Log2 (
-		Word32 L_x,        /* (i) : input value                                 */
-		Word16 *exponent,  /* (o) : Integer part of Log2.   (range: 0<=val<=30) */
-		Word16 *fraction   /* (o) : Fractional part of Log2. (range: 0<=val<1)*/
-	  );
+        Word32 L_x,        /* (i) : input value                                 */
+        Word16 *exponent,  /* (o) : Integer part of Log2.   (range: 0<=val<=30) */
+        Word16 *fraction   /* (o) : Fractional part of Log2. (range: 0<=val<1)*/
+      );
 
 void Log2_norm (
-		Word32 L_x,         /* (i) : input value (normalized)                    */
-		Word16 exp,         /* (i) : norm_l (L_x)                                */
-		Word16 *exponent,   /* (o) : Integer part of Log2.   (range: 0<=val<=30) */
-		Word16 *fraction    /* (o) : Fractional part of Log2. (range: 0<=val<1)  */
-	       );
+        Word32 L_x,         /* (i) : input value (normalized)                    */
+        Word16 exp,         /* (i) : norm_l (L_x)                                */
+        Word16 *exponent,   /* (o) : Integer part of Log2.   (range: 0<=val<=30) */
+        Word16 *fraction    /* (o) : Fractional part of Log2. (range: 0<=val<1)  */
+           );
 
 #endif  //__LOG2_H__
 
diff --git a/media/libstagefright/codecs/amrwbenc/inc/main.h b/media/libstagefright/codecs/amrwbenc/inc/main.h
index 3a6f963..adef2df 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/main.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/main.h
@@ -17,9 +17,9 @@
 
 
 /*--------------------------------------------------------------------------*
- *                         MAIN.H	                                    *
+ *                         MAIN.H                                       *
  *--------------------------------------------------------------------------*
- *       Main functions							    *
+ *       Main functions                             *
  *--------------------------------------------------------------------------*/
 
 #ifndef __MAIN_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/math_op.h b/media/libstagefright/codecs/amrwbenc/inc/math_op.h
index 7b6196b..c3c00bc 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/math_op.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/math_op.h
@@ -16,40 +16,40 @@
 
 
 /*--------------------------------------------------------------------------*
- *                         MATH_OP.H	                                    *
+ *                         MATH_OP.H                                        *
  *--------------------------------------------------------------------------*
- *       Mathematical operations					    *
+ *       Mathematical operations                        *
  *--------------------------------------------------------------------------*/
 
 #ifndef __MATH_OP_H__
 #define __MATH_OP_H__
 
 Word32 Isqrt(                              /* (o) Q31 : output value (range: 0<=val<1)         */
-		Word32 L_x                            /* (i) Q0  : input value  (range: 0<=val<=7fffffff) */
-	    );
+        Word32 L_x                            /* (i) Q0  : input value  (range: 0<=val<=7fffffff) */
+        );
 
 void Isqrt_n(
-		Word32 * frac,                        /* (i/o) Q31: normalized value (1.0 < frac <= 0.5) */
-		Word16 * exp                          /* (i/o)    : exponent (value = frac x 2^exponent) */
-	    );
+        Word32 * frac,                        /* (i/o) Q31: normalized value (1.0 < frac <= 0.5) */
+        Word16 * exp                          /* (i/o)    : exponent (value = frac x 2^exponent) */
+        );
 
 Word32 Pow2(                               /* (o) Q0  : result       (range: 0<=val<=0x7fffffff) */
-		Word16 exponant,                      /* (i) Q0  : Integer part.      (range: 0<=val<=30)   */
-		Word16 fraction                       /* (i) Q15 : Fractionnal part.  (range: 0.0<=val<1.0) */
-	   );
+        Word16 exponant,                      /* (i) Q0  : Integer part.      (range: 0<=val<=30)   */
+        Word16 fraction                       /* (i) Q15 : Fractionnal part.  (range: 0.0<=val<1.0) */
+       );
 
 Word32 Dot_product12(                      /* (o) Q31: normalized result (1 < val <= -1) */
-		Word16 x[],                           /* (i) 12bits: x vector                       */
-		Word16 y[],                           /* (i) 12bits: y vector                       */
-		Word16 lg,                            /* (i)    : vector length                     */
-		Word16 * exp                          /* (o)    : exponent of result (0..+30)       */
-		);
+        Word16 x[],                           /* (i) 12bits: x vector                       */
+        Word16 y[],                           /* (i) 12bits: y vector                       */
+        Word16 lg,                            /* (i)    : vector length                     */
+        Word16 * exp                          /* (o)    : exponent of result (0..+30)       */
+        );
 
 Word32 Dot_product12_asm(                      /* (o) Q31: normalized result (1 < val <= -1) */
-		Word16 x[],                           /* (i) 12bits: x vector                       */
-		Word16 y[],                           /* (i) 12bits: y vector                       */
-		Word16 lg,                            /* (i)    : vector length                     */
-		Word16 * exp                          /* (o)    : exponent of result (0..+30)       */
-		);
+        Word16 x[],                           /* (i) 12bits: x vector                       */
+        Word16 y[],                           /* (i) 12bits: y vector                       */
+        Word16 lg,                            /* (i)    : vector length                     */
+        Word16 * exp                          /* (o)    : exponent of result (0..+30)       */
+        );
 #endif //__MATH_OP_H__
 
diff --git a/media/libstagefright/codecs/amrwbenc/inc/mem_align.h b/media/libstagefright/codecs/amrwbenc/inc/mem_align.h
index 442786a..2ae5a6c 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/mem_align.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/mem_align.h
@@ -14,9 +14,9 @@
  ** limitations under the License.
  */
 /*******************************************************************************
-	File:		mem_align.h
+    File:       mem_align.h
 
-	Content:	Memory alloc alignments functions
+    Content:    Memory alloc alignments functions
 
 *******************************************************************************/
 
@@ -29,7 +29,7 @@
 extern void *mem_malloc(VO_MEM_OPERATOR *pMemop, unsigned int size, unsigned char alignment, unsigned int CodecID);
 extern void mem_free(VO_MEM_OPERATOR *pMemop, void *mem_ptr, unsigned int CodecID);
 
-#endif	/* __VO_MEM_ALIGN_H__ */
+#endif  /* __VO_MEM_ALIGN_H__ */
 
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/inc/p_med_o.h b/media/libstagefright/codecs/amrwbenc/inc/p_med_o.h
index 4a13f16..77487ed 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/p_med_o.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/p_med_o.h
@@ -17,36 +17,36 @@
 /*--------------------------------------------------------------------------*
  *                         P_MED_O.H                                        *
  *--------------------------------------------------------------------------*
- *       Median open-loop lag search				            *
+ *       Median open-loop lag search                            *
  *--------------------------------------------------------------------------*/
 
 #ifndef __P_MED_O_H__
 #define __P_MED_O_H__
 
 Word16 Pitch_med_ol(                       /* output: open loop pitch lag                        */
-		Word16 wsp[],                         /* input : signal used to compute the open loop pitch */
-		/* wsp[-pit_max] to wsp[-1] should be known   */
-		Word16 L_min,                         /* input : minimum pitch lag                          */
-		Word16 L_max,                         /* input : maximum pitch lag                          */
-		Word16 L_frame,                       /* input : length of frame to compute pitch           */
-		Word16 L_0,                           /* input : old_ open-loop pitch                       */
-		Word16 * gain,                        /* output: normalize correlation of hp_wsp for the Lag */
-		Word16 * hp_wsp_mem,                  /* i:o   : memory of the hypass filter for hp_wsp[] (lg=9)   */
-		Word16 * old_hp_wsp,                  /* i:o   : hypass wsp[]                               */
-		Word16 wght_flg                       /* input : is weighting function used                 */
-		);
+        Word16 wsp[],                         /* input : signal used to compute the open loop pitch */
+        /* wsp[-pit_max] to wsp[-1] should be known   */
+        Word16 L_min,                         /* input : minimum pitch lag                          */
+        Word16 L_max,                         /* input : maximum pitch lag                          */
+        Word16 L_frame,                       /* input : length of frame to compute pitch           */
+        Word16 L_0,                           /* input : old_ open-loop pitch                       */
+        Word16 * gain,                        /* output: normalize correlation of hp_wsp for the Lag */
+        Word16 * hp_wsp_mem,                  /* i:o   : memory of the hypass filter for hp_wsp[] (lg=9)   */
+        Word16 * old_hp_wsp,                  /* i:o   : hypass wsp[]                               */
+        Word16 wght_flg                       /* input : is weighting function used                 */
+        );
 
 Word16 Med_olag(                           /* output : median of  5 previous open-loop lags       */
-		Word16 prev_ol_lag,                   /* input  : previous open-loop lag                     */
-		Word16 old_ol_lag[5]
-	       );
+        Word16 prev_ol_lag,                   /* input  : previous open-loop lag                     */
+        Word16 old_ol_lag[5]
+           );
 
 void Hp_wsp(
-		Word16 wsp[],                         /* i   : wsp[]  signal       */
-		Word16 hp_wsp[],                      /* o   : hypass wsp[]        */
-		Word16 lg,                            /* i   : lenght of signal    */
-		Word16 mem[]                          /* i/o : filter memory [9]   */
-	   );
+        Word16 wsp[],                         /* i   : wsp[]  signal       */
+        Word16 hp_wsp[],                      /* o   : hypass wsp[]        */
+        Word16 lg,                            /* i   : lenght of signal    */
+        Word16 mem[]                          /* i/o : filter memory [9]   */
+       );
 
 #endif  //__P_MED_O_H__
 
diff --git a/media/libstagefright/codecs/amrwbenc/inc/q_pulse.h b/media/libstagefright/codecs/amrwbenc/inc/q_pulse.h
index b5d5280..67140fc 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/q_pulse.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/q_pulse.h
@@ -19,7 +19,7 @@
 /*--------------------------------------------------------------------------*
  *                         Q_PULSE.H                                        *
  *--------------------------------------------------------------------------*
- * Coding and decoding of algebraic codebook			            *
+ * Coding and decoding of algebraic codebook                        *
  *--------------------------------------------------------------------------*/
 
 #ifndef  __Q_PULSE_H__
@@ -28,38 +28,38 @@
 #include "typedef.h"
 
 Word32 quant_1p_N1(                        /* (o) return (N+1) bits           */
-		Word16 pos,                           /* (i) position of the pulse       */
-		Word16 N);                            /* (i) number of bits for position */
+        Word16 pos,                           /* (i) position of the pulse       */
+        Word16 N);                            /* (i) number of bits for position */
 
 Word32 quant_2p_2N1(                       /* (o) return (2*N)+1 bits         */
-		Word16 pos1,                          /* (i) position of the pulse 1     */
-		Word16 pos2,                          /* (i) position of the pulse 2     */
-		Word16 N);                            /* (i) number of bits for position */
+        Word16 pos1,                          /* (i) position of the pulse 1     */
+        Word16 pos2,                          /* (i) position of the pulse 2     */
+        Word16 N);                            /* (i) number of bits for position */
 
 Word32 quant_3p_3N1(                       /* (o) return (3*N)+1 bits         */
-		Word16 pos1,                          /* (i) position of the pulse 1     */
-		Word16 pos2,                          /* (i) position of the pulse 2     */
-		Word16 pos3,                          /* (i) position of the pulse 3     */
-		Word16 N);                            /* (i) number of bits for position */
+        Word16 pos1,                          /* (i) position of the pulse 1     */
+        Word16 pos2,                          /* (i) position of the pulse 2     */
+        Word16 pos3,                          /* (i) position of the pulse 3     */
+        Word16 N);                            /* (i) number of bits for position */
 
 Word32 quant_4p_4N1(                       /* (o) return (4*N)+1 bits         */
-		Word16 pos1,                          /* (i) position of the pulse 1     */
-		Word16 pos2,                          /* (i) position of the pulse 2     */
-		Word16 pos3,                          /* (i) position of the pulse 3     */
-		Word16 pos4,                          /* (i) position of the pulse 4     */
-		Word16 N);                            /* (i) number of bits for position */
+        Word16 pos1,                          /* (i) position of the pulse 1     */
+        Word16 pos2,                          /* (i) position of the pulse 2     */
+        Word16 pos3,                          /* (i) position of the pulse 3     */
+        Word16 pos4,                          /* (i) position of the pulse 4     */
+        Word16 N);                            /* (i) number of bits for position */
 
 Word32 quant_4p_4N(                        /* (o) return 4*N bits             */
-		Word16 pos[],                         /* (i) position of the pulse 1..4  */
-		Word16 N);                            /* (i) number of bits for position */
+        Word16 pos[],                         /* (i) position of the pulse 1..4  */
+        Word16 N);                            /* (i) number of bits for position */
 
 Word32 quant_5p_5N(                        /* (o) return 5*N bits             */
-		Word16 pos[],                         /* (i) position of the pulse 1..5  */
-		Word16 N);                            /* (i) number of bits for position */
+        Word16 pos[],                         /* (i) position of the pulse 1..5  */
+        Word16 N);                            /* (i) number of bits for position */
 
 Word32 quant_6p_6N_2(                      /* (o) return (6*N)-2 bits         */
-		Word16 pos[],                         /* (i) position of the pulse 1..6  */
-		Word16 N);                            /* (i) number of bits for position */
+        Word16 pos[],                         /* (i) position of the pulse 1..6  */
+        Word16 N);                            /* (i) number of bits for position */
 
 
 #endif //__Q_PULSE_H__
diff --git a/media/libstagefright/codecs/amrwbenc/inc/stream.h b/media/libstagefright/codecs/amrwbenc/inc/stream.h
index 4c1d0f0..ec1a700 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/stream.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/stream.h
@@ -17,7 +17,7 @@
 
 
 /***********************************************************************
-File:		stream.h
+File:       stream.h
 
 Contains:       VOME API Buffer Operator Implement Header
 
@@ -28,16 +28,16 @@
 #include "voMem.h"
 #define Frame_Maxsize  1024 * 2  //Work Buffer 10K
 #define Frame_MaxByte  640        //AMR_WB Encoder one frame 320 samples = 640 Bytes
-#define MIN(a,b)	 ((a) < (b)? (a) : (b))
+#define MIN(a,b)     ((a) < (b)? (a) : (b))
 
 typedef struct{
-	unsigned char *set_ptr;
-	unsigned char *frame_ptr;
-	unsigned char *frame_ptr_bk;
-	int  set_len;
-	int  framebuffer_len;
-	int  frame_storelen;
-	int  used_len;
+    unsigned char *set_ptr;
+    unsigned char *frame_ptr;
+    unsigned char *frame_ptr_bk;
+    int  set_len;
+    int  framebuffer_len;
+    int  frame_storelen;
+    int  used_len;
 }FrameStream;
 
 void voAWB_UpdateFrameBuffer(FrameStream *stream, VO_MEM_OPERATOR *pMemOP);
diff --git a/media/libstagefright/codecs/amrwbenc/inc/wb_vad.h b/media/libstagefright/codecs/amrwbenc/inc/wb_vad.h
index 6822f48..9a9af4f 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/wb_vad.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/wb_vad.h
@@ -37,28 +37,28 @@
 
 typedef struct
 {
-	Word16 bckr_est[COMPLEN];              /* background noise estimate                */
-	Word16 ave_level[COMPLEN];             /* averaged input components for stationary */
-	/* estimation                               */
-	Word16 old_level[COMPLEN];             /* input levels of the previous frame       */
-	Word16 sub_level[COMPLEN];             /* input levels calculated at the end of a frame (lookahead)  */
-	Word16 a_data5[F_5TH_CNT][2];          /* memory for the filter bank               */
-	Word16 a_data3[F_3TH_CNT];             /* memory for the filter bank               */
+    Word16 bckr_est[COMPLEN];              /* background noise estimate                */
+    Word16 ave_level[COMPLEN];             /* averaged input components for stationary */
+    /* estimation                               */
+    Word16 old_level[COMPLEN];             /* input levels of the previous frame       */
+    Word16 sub_level[COMPLEN];             /* input levels calculated at the end of a frame (lookahead)  */
+    Word16 a_data5[F_5TH_CNT][2];          /* memory for the filter bank               */
+    Word16 a_data3[F_3TH_CNT];             /* memory for the filter bank               */
 
-	Word16 burst_count;                    /* counts length of a speech burst          */
-	Word16 hang_count;                     /* hangover counter                         */
-	Word16 stat_count;                     /* stationary counter                       */
+    Word16 burst_count;                    /* counts length of a speech burst          */
+    Word16 hang_count;                     /* hangover counter                         */
+    Word16 stat_count;                     /* stationary counter                       */
 
-	/* Note that each of the following two variables holds 15 flags. Each flag reserves 1 bit of the
-	 * variable. The newest flag is in the bit 15 (assuming that LSB is bit 1 and MSB is bit 16). */
-	Word16 vadreg;                         /* flags for intermediate VAD decisions     */
-	Word16 tone_flag;                      /* tone detection flags                     */
+    /* Note that each of the following two variables holds 15 flags. Each flag reserves 1 bit of the
+     * variable. The newest flag is in the bit 15 (assuming that LSB is bit 1 and MSB is bit 16). */
+    Word16 vadreg;                         /* flags for intermediate VAD decisions     */
+    Word16 tone_flag;                      /* tone detection flags                     */
 
-	Word16 sp_est_cnt;                     /* counter for speech level estimation      */
-	Word16 sp_max;                         /* maximum level                            */
-	Word16 sp_max_cnt;                     /* counts frames that contains speech       */
-	Word16 speech_level;                   /* estimated speech level                   */
-	Word32 prev_pow_sum;                   /* power of previous frame                  */
+    Word16 sp_est_cnt;                     /* counter for speech level estimation      */
+    Word16 sp_max;                         /* maximum level                            */
+    Word16 sp_max_cnt;                     /* counts frames that contains speech       */
+    Word16 speech_level;                   /* estimated speech level                   */
+    Word32 prev_pow_sum;                   /* power of previous frame                  */
 
 } VadVars;
 
diff --git a/media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h b/media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h
index 04fd318..00b1779 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/wb_vad_c.h
@@ -16,9 +16,9 @@
 
 
 /*-------------------------------------------------------------------*
- *                         WB_VAD_C.H				     *
+ *                         WB_VAD_C.H                    *
  *-------------------------------------------------------------------*
- * Constants for Voice Activity Detection.			     *
+ * Constants for Voice Activity Detection.               *
  *-------------------------------------------------------------------*/
 
 #ifndef __WB_VAD_C_H__
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s
index 282db92..42ebc32 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Deemph_32_opt.s
@@ -99,6 +99,6 @@
            LDMFD   	r13!, {r4 - r12, r15}
 
 	   @ENDP
-	   .END
+	   .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s
index 4aa317e..3f060ff 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Dot_p_opt.s
@@ -75,6 +75,6 @@
 
           LDMFD   	    r13!, {r4 - r12, r15}
           @ENDFUNC
-          .END
+          .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
index f23b5a0..9cad479 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Filt_6k_7k_opt.s
@@ -183,6 +183,6 @@
 Lable1:
           .word   		fir_6k_7k-Lable1
           @ENDFUNC
-          .END
+          .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s
index 49bdc2b..ffedbde 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Norm_Corr_opt.s
@@ -226,6 +226,6 @@
         ADD            r13, r13, #voSTACK
         LDMFD          r13!, {r4 - r12, r15}
 
-        .END
+        .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s
index 3f4930c..9743b9e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/Syn_filt_32_opt.s
@@ -221,6 +221,6 @@
 
           LDMFD   	    r13!, {r4 - r12, r15}
           @ENDFUNC
-          .END
+          .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s
index 71bb532..cd75179 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/convolve_opt.s
@@ -181,6 +181,6 @@
         LDMFD      r13!, {r4 - r12, r15}
 
         @ENDFUNC
-        .END
+        .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s
index 2d4c7cc..eedccc7 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/cor_h_vec_opt.s
@@ -143,7 +143,7 @@
          LDMFD         r13!, {r4 - r12, r15}
 
          @ENDFUNC
-         .END
+         .end
 
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
index deb7efc..60c2a47 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/pred_lt4_1_opt.s
@@ -45,7 +45,8 @@
          SUBLT     r5, r5, #2                         @x--
          SUB       r5, r5, #30                        @x -= 15
          RSB       r4, r2, #3                         @k = 3 - frac
-         ADRL      r8, Table
+         ADR       r8, Table
+         NOP                      @space for fixed up relative address of ADR
          LDR       r6, [r8]
          ADD       r6, r8
 	 MOV       r8, r4, LSL #6
@@ -456,7 +457,7 @@
 Table:
          .word       inter4_2-Table
 	 @ENDFUNC
-	 .END
+	 .end
 
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s
index 5ff0964..d71d790 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/residu_asm_opt.s
@@ -220,7 +220,7 @@
 	LDMFD	r13!, {r4 -r12,pc}
 
         @ENDFUNC
-        .END
+        .end
 
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s
index b300224..e8802f5 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/scale_sig_opt.s
@@ -67,7 +67,7 @@
          LDMFD         r13!, {r4 - r12, r15}
 
          @ENDFUNC
-         .END
+         .end
 
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s
index 0c287a4..2a1e0d7 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV5E/syn_filt_opt.s
@@ -233,6 +233,6 @@
           ADD           r13, r13, #700
           LDMFD   	r13!, {r4 - r12, r15}
           @ENDFUNC
-          .END
+          .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s
index 1d5893f..91feea0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Deemph_32_neon.s
@@ -98,5 +98,5 @@
 
            LDMFD   	r13!, {r4 - r12, r15}
 
-	   .END
+	   .end
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s
index 8230944..7149a49 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Dot_p_neon.s
@@ -123,5 +123,5 @@
 
           LDMFD   	    r13!, {r4 - r12, r15}
 
-          .END
+          .end
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
index 8df0caa..e0f992f 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Filt_6k_7k_neon.s
@@ -226,6 +226,6 @@
 Lable1:
           .word   		fir_6k_7k-Lable1
           @ENDFUNC
-          .END
+          .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s
index 4263cd4..28e6d6c 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Norm_Corr_neon.s
@@ -265,6 +265,6 @@
         ADD            r13, r13, #voSTACK
         LDMFD          r13!, {r4 - r12, r15}
 
-        .END
+        .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s
index e786dde..9687431 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/Syn_filt_32_neon.s
@@ -128,6 +128,6 @@
 
           LDMFD   	    r13!, {r4 - r12, r15}
           @ENDFUNC
-          .END
+          .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s
index 8efa9fb..9fb3a6e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/convolve_neon.s
@@ -174,5 +174,5 @@
         LDMFD      r13!, {r4 - r12, r15}
 
         @ENDFUNC
-        .END
+        .end
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s
index 8904289..a4deda3 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/cor_h_vec_neon.s
@@ -143,7 +143,7 @@
 the_end:
              LDMFD         r13!, {r4 - r12, r15}
 
-	     .END
+	     .end
 
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
index 67be1ed..f8b634f 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/pred_lt4_1_neon.s
@@ -99,5 +99,5 @@
 Lable1:
           .word   	inter4_2-Lable1
           @ENDFUNC
-          .END
+          .end
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s
index 394fa83..bc3d780 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/residu_asm_neon.s
@@ -122,6 +122,6 @@
         LDMFD      r13!, {r4 - r12, r15}
 
         @ENDFUNC
-        .END
+        .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s
index e45daac..89c0572 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/scale_sig_neon.s
@@ -133,6 +133,6 @@
 
           LDMFD   	r13!, {r4 - r12, r15}
           @ENDFUNC
-          .END
+          .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s
index 5731bdb..029560e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s
+++ b/media/libstagefright/codecs/amrwbenc/src/asm/ARMV7/syn_filt_neon.s
@@ -101,6 +101,6 @@
           ADD           r13, r13, #700
           LDMFD   	r13!, {r4 - r12, r15}
           @ENDFUNC
-          .END
+          .end
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/autocorr.c b/media/libstagefright/codecs/amrwbenc/src/autocorr.c
index 0b2ea89..3ea53f7 100644
--- a/media/libstagefright/codecs/amrwbenc/src/autocorr.c
+++ b/media/libstagefright/codecs/amrwbenc/src/autocorr.c
@@ -31,100 +31,100 @@
 #define UNUSED(x) (void)(x)
 
 void Autocorr(
-		Word16 x[],                           /* (i)    : Input signal                      */
-		Word16 m,                             /* (i)    : LPC order                         */
-		Word16 r_h[],                         /* (o) Q15: Autocorrelations  (msb)           */
-		Word16 r_l[]                          /* (o)    : Autocorrelations  (lsb)           */
-	     )
+        Word16 x[],                           /* (i)    : Input signal                      */
+        Word16 m,                             /* (i)    : LPC order                         */
+        Word16 r_h[],                         /* (o) Q15: Autocorrelations  (msb)           */
+        Word16 r_l[]                          /* (o)    : Autocorrelations  (lsb)           */
+         )
 {
-	Word32 i, norm, shift;
-	Word16 y[L_WINDOW];
-	Word32 L_sum, L_sum1, L_tmp, F_LEN;
-	Word16 *p1,*p2,*p3;
-	const Word16 *p4;
+    Word32 i, norm, shift;
+    Word16 y[L_WINDOW];
+    Word32 L_sum, L_sum1, L_tmp, F_LEN;
+    Word16 *p1,*p2,*p3;
+    const Word16 *p4;
         UNUSED(m);
 
-	/* Windowing of signal */
-	p1 = x;
-	p4 = vo_window;
-	p3 = y;
+    /* Windowing of signal */
+    p1 = x;
+    p4 = vo_window;
+    p3 = y;
 
-	for (i = 0; i < L_WINDOW; i+=4)
-	{
-		*p3++ = vo_mult_r((*p1++), (*p4++));
-		*p3++ = vo_mult_r((*p1++), (*p4++));
-		*p3++ = vo_mult_r((*p1++), (*p4++));
-		*p3++ = vo_mult_r((*p1++), (*p4++));
-	}
+    for (i = 0; i < L_WINDOW; i+=4)
+    {
+        *p3++ = vo_mult_r((*p1++), (*p4++));
+        *p3++ = vo_mult_r((*p1++), (*p4++));
+        *p3++ = vo_mult_r((*p1++), (*p4++));
+        *p3++ = vo_mult_r((*p1++), (*p4++));
+    }
 
-	/* calculate energy of signal */
-	L_sum = vo_L_deposit_h(16);               /* sqrt(256), avoid overflow after rounding */
-	for (i = 0; i < L_WINDOW; i++)
-	{
-		L_tmp = vo_L_mult(y[i], y[i]);
-		L_tmp = (L_tmp >> 8);
-		L_sum += L_tmp;
-	}
+    /* calculate energy of signal */
+    L_sum = vo_L_deposit_h(16);               /* sqrt(256), avoid overflow after rounding */
+    for (i = 0; i < L_WINDOW; i++)
+    {
+        L_tmp = vo_L_mult(y[i], y[i]);
+        L_tmp = (L_tmp >> 8);
+        L_sum += L_tmp;
+    }
 
-	/* scale signal to avoid overflow in autocorrelation */
-	norm = norm_l(L_sum);
-	shift = 4 - (norm >> 1);
-	if(shift > 0)
-	{
-		p1 = y;
-		for (i = 0; i < L_WINDOW; i+=4)
-		{
-			*p1 = vo_shr_r(*p1, shift);
-			p1++;
-			*p1 = vo_shr_r(*p1, shift);
-			p1++;
-			*p1 = vo_shr_r(*p1, shift);
-			p1++;
-			*p1 = vo_shr_r(*p1, shift);
-			p1++;
-		}
-	}
+    /* scale signal to avoid overflow in autocorrelation */
+    norm = norm_l(L_sum);
+    shift = 4 - (norm >> 1);
+    if(shift > 0)
+    {
+        p1 = y;
+        for (i = 0; i < L_WINDOW; i+=4)
+        {
+            *p1 = vo_shr_r(*p1, shift);
+            p1++;
+            *p1 = vo_shr_r(*p1, shift);
+            p1++;
+            *p1 = vo_shr_r(*p1, shift);
+            p1++;
+            *p1 = vo_shr_r(*p1, shift);
+            p1++;
+        }
+    }
 
-	/* Compute and normalize r[0] */
-	L_sum = 1;
-	for (i = 0; i < L_WINDOW; i+=4)
-	{
-		L_sum += vo_L_mult(y[i], y[i]);
-		L_sum += vo_L_mult(y[i+1], y[i+1]);
-		L_sum += vo_L_mult(y[i+2], y[i+2]);
-		L_sum += vo_L_mult(y[i+3], y[i+3]);
-	}
+    /* Compute and normalize r[0] */
+    L_sum = 1;
+    for (i = 0; i < L_WINDOW; i+=4)
+    {
+        L_sum += vo_L_mult(y[i], y[i]);
+        L_sum += vo_L_mult(y[i+1], y[i+1]);
+        L_sum += vo_L_mult(y[i+2], y[i+2]);
+        L_sum += vo_L_mult(y[i+3], y[i+3]);
+    }
 
-	norm = norm_l(L_sum);
-	L_sum = (L_sum << norm);
+    norm = norm_l(L_sum);
+    L_sum = (L_sum << norm);
 
-	r_h[0] = L_sum >> 16;
-	r_l[0] = (L_sum & 0xffff)>>1;
+    r_h[0] = L_sum >> 16;
+    r_l[0] = (L_sum & 0xffff)>>1;
 
-	/* Compute r[1] to r[m] */
-	for (i = 1; i <= 8; i++)
-	{
-		L_sum1 = 0;
-		L_sum = 0;
-		F_LEN = (Word32)(L_WINDOW - 2*i);
-		p1 = y;
-		p2 = y + (2*i)-1;
-		do{
-			L_sum1 += *p1 * *p2++;
-			L_sum += *p1++ * *p2;
-		}while(--F_LEN!=0);
+    /* Compute r[1] to r[m] */
+    for (i = 1; i <= 8; i++)
+    {
+        L_sum1 = 0;
+        L_sum = 0;
+        F_LEN = (Word32)(L_WINDOW - 2*i);
+        p1 = y;
+        p2 = y + (2*i)-1;
+        do{
+            L_sum1 += *p1 * *p2++;
+            L_sum += *p1++ * *p2;
+        }while(--F_LEN!=0);
 
-		L_sum1 += *p1 * *p2++;
+        L_sum1 += *p1 * *p2++;
 
-		L_sum1 = L_sum1<<norm;
-		L_sum = L_sum<<norm;
+        L_sum1 = L_sum1<<norm;
+        L_sum = L_sum<<norm;
 
-		r_h[(2*i)-1] = L_sum1 >> 15;
-		r_l[(2*i)-1] = L_sum1 & 0x00007fff;
-		r_h[(2*i)] = L_sum >> 15;
-		r_l[(2*i)] = L_sum & 0x00007fff;
-	}
-	return;
+        r_h[(2*i)-1] = L_sum1 >> 15;
+        r_l[(2*i)-1] = L_sum1 & 0x00007fff;
+        r_h[(2*i)] = L_sum >> 15;
+        r_l[(2*i)] = L_sum & 0x00007fff;
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/az_isp.c b/media/libstagefright/codecs/amrwbenc/src/az_isp.c
index 43db27a..d7074f0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/az_isp.c
+++ b/media/libstagefright/codecs/amrwbenc/src/az_isp.c
@@ -58,138 +58,138 @@
 static __inline Word16 Chebps2(Word16 x, Word16 f[], Word32 n);
 
 void Az_isp(
-		Word16 a[],                           /* (i) Q12 : predictor coefficients                 */
-		Word16 isp[],                         /* (o) Q15 : Immittance spectral pairs              */
-		Word16 old_isp[]                      /* (i)     : old isp[] (in case not found M roots)  */
-	   )
+        Word16 a[],                           /* (i) Q12 : predictor coefficients                 */
+        Word16 isp[],                         /* (o) Q15 : Immittance spectral pairs              */
+        Word16 old_isp[]                      /* (i)     : old isp[] (in case not found M roots)  */
+       )
 {
-	Word32 i, j, nf, ip, order;
-	Word16 xlow, ylow, xhigh, yhigh, xmid, ymid, xint;
-	Word16 x, y, sign, exp;
-	Word16 *coef;
-	Word16 f1[NC + 1], f2[NC];
-	Word32 t0;
-	/*-------------------------------------------------------------*
-	 * find the sum and diff polynomials F1(z) and F2(z)           *
-	 *      F1(z) = [A(z) + z^M A(z^-1)]                           *
-	 *      F2(z) = [A(z) - z^M A(z^-1)]/(1-z^-2)                  *
-	 *                                                             *
-	 * for (i=0; i<NC; i++)                                        *
-	 * {                                                           *
-	 *   f1[i] = a[i] + a[M-i];                                    *
-	 *   f2[i] = a[i] - a[M-i];                                    *
-	 * }                                                           *
-	 * f1[NC] = 2.0*a[NC];                                         *
-	 *                                                             *
-	 * for (i=2; i<NC; i++)            Divide by (1-z^-2)          *
-	 *   f2[i] += f2[i-2];                                         *
-	 *-------------------------------------------------------------*/
-	for (i = 0; i < NC; i++)
-	{
-		t0 = a[i] << 15;
-		f1[i] = vo_round(t0 + (a[M - i] << 15));        /* =(a[i]+a[M-i])/2 */
-		f2[i] = vo_round(t0 - (a[M - i] << 15));        /* =(a[i]-a[M-i])/2 */
-	}
-	f1[NC] = a[NC];
-	for (i = 2; i < NC; i++)               /* Divide by (1-z^-2) */
-		f2[i] = add1(f2[i], f2[i - 2]);
+    Word32 i, j, nf, ip, order;
+    Word16 xlow, ylow, xhigh, yhigh, xmid, ymid, xint;
+    Word16 x, y, sign, exp;
+    Word16 *coef;
+    Word16 f1[NC + 1], f2[NC];
+    Word32 t0;
+    /*-------------------------------------------------------------*
+     * find the sum and diff polynomials F1(z) and F2(z)           *
+     *      F1(z) = [A(z) + z^M A(z^-1)]                           *
+     *      F2(z) = [A(z) - z^M A(z^-1)]/(1-z^-2)                  *
+     *                                                             *
+     * for (i=0; i<NC; i++)                                        *
+     * {                                                           *
+     *   f1[i] = a[i] + a[M-i];                                    *
+     *   f2[i] = a[i] - a[M-i];                                    *
+     * }                                                           *
+     * f1[NC] = 2.0*a[NC];                                         *
+     *                                                             *
+     * for (i=2; i<NC; i++)            Divide by (1-z^-2)          *
+     *   f2[i] += f2[i-2];                                         *
+     *-------------------------------------------------------------*/
+    for (i = 0; i < NC; i++)
+    {
+        t0 = a[i] << 15;
+        f1[i] = vo_round(t0 + (a[M - i] << 15));        /* =(a[i]+a[M-i])/2 */
+        f2[i] = vo_round(t0 - (a[M - i] << 15));        /* =(a[i]-a[M-i])/2 */
+    }
+    f1[NC] = a[NC];
+    for (i = 2; i < NC; i++)               /* Divide by (1-z^-2) */
+        f2[i] = add1(f2[i], f2[i - 2]);
 
-	/*---------------------------------------------------------------------*
-	 * Find the ISPs (roots of F1(z) and F2(z) ) using the                 *
-	 * Chebyshev polynomial evaluation.                                    *
-	 * The roots of F1(z) and F2(z) are alternatively searched.            *
-	 * We start by finding the first root of F1(z) then we switch          *
-	 * to F2(z) then back to F1(z) and so on until all roots are found.    *
-	 *                                                                     *
-	 *  - Evaluate Chebyshev pol. at grid points and check for sign change.*
-	 *  - If sign change track the root by subdividing the interval        *
-	 *    2 times and ckecking sign change.                                *
-	 *---------------------------------------------------------------------*/
-	nf = 0;                                  /* number of found frequencies */
-	ip = 0;                                  /* indicator for f1 or f2      */
-	coef = f1;
-	order = NC;
-	xlow = vogrid[0];
-	ylow = Chebps2(xlow, coef, order);
-	j = 0;
-	while ((nf < M - 1) && (j < GRID_POINTS))
-	{
-		j ++;
-		xhigh = xlow;
-		yhigh = ylow;
-		xlow = vogrid[j];
-		ylow = Chebps2(xlow, coef, order);
-		if ((ylow * yhigh) <= (Word32) 0)
-		{
-			/* divide 2 times the interval */
-			for (i = 0; i < 2; i++)
-			{
-				xmid = (xlow >> 1) + (xhigh >> 1);        /* xmid = (xlow + xhigh)/2 */
-				ymid = Chebps2(xmid, coef, order);
-				if ((ylow * ymid) <= (Word32) 0)
-				{
-					yhigh = ymid;
-					xhigh = xmid;
-				} else
-				{
-					ylow = ymid;
-					xlow = xmid;
-				}
-			}
-			/*-------------------------------------------------------------*
-			 * Linear interpolation                                        *
-			 *    xint = xlow - ylow*(xhigh-xlow)/(yhigh-ylow);            *
-			 *-------------------------------------------------------------*/
-			x = xhigh - xlow;
-			y = yhigh - ylow;
-			if (y == 0)
-			{
-				xint = xlow;
-			} else
-			{
-				sign = y;
-				y = abs_s(y);
-				exp = norm_s(y);
-				y = y << exp;
-				y = div_s((Word16) 16383, y);
-				t0 = x * y;
-				t0 = (t0 >> (19 - exp));
-				y = vo_extract_l(t0);         /* y= (xhigh-xlow)/(yhigh-ylow) in Q11 */
-				if (sign < 0)
-					y = -y;
-				t0 = ylow * y;      /* result in Q26 */
-				t0 = (t0 >> 10);        /* result in Q15 */
-				xint = vo_sub(xlow, vo_extract_l(t0));        /* xint = xlow - ylow*y */
-			}
-			isp[nf] = xint;
-			xlow = xint;
-			nf++;
-			if (ip == 0)
-			{
-				ip = 1;
-				coef = f2;
-				order = NC - 1;
-			} else
-			{
-				ip = 0;
-				coef = f1;
-				order = NC;
-			}
-			ylow = Chebps2(xlow, coef, order);
-		}
-	}
-	/* Check if M-1 roots found */
-	if(nf < M - 1)
-	{
-		for (i = 0; i < M; i++)
-		{
-			isp[i] = old_isp[i];
-		}
-	} else
-	{
-		isp[M - 1] = a[M] << 3;                      /* From Q12 to Q15 with saturation */
-	}
-	return;
+    /*---------------------------------------------------------------------*
+     * Find the ISPs (roots of F1(z) and F2(z) ) using the                 *
+     * Chebyshev polynomial evaluation.                                    *
+     * The roots of F1(z) and F2(z) are alternatively searched.            *
+     * We start by finding the first root of F1(z) then we switch          *
+     * to F2(z) then back to F1(z) and so on until all roots are found.    *
+     *                                                                     *
+     *  - Evaluate Chebyshev pol. at grid points and check for sign change.*
+     *  - If sign change track the root by subdividing the interval        *
+     *    2 times and ckecking sign change.                                *
+     *---------------------------------------------------------------------*/
+    nf = 0;                                  /* number of found frequencies */
+    ip = 0;                                  /* indicator for f1 or f2      */
+    coef = f1;
+    order = NC;
+    xlow = vogrid[0];
+    ylow = Chebps2(xlow, coef, order);
+    j = 0;
+    while ((nf < M - 1) && (j < GRID_POINTS))
+    {
+        j ++;
+        xhigh = xlow;
+        yhigh = ylow;
+        xlow = vogrid[j];
+        ylow = Chebps2(xlow, coef, order);
+        if ((ylow * yhigh) <= (Word32) 0)
+        {
+            /* divide 2 times the interval */
+            for (i = 0; i < 2; i++)
+            {
+                xmid = (xlow >> 1) + (xhigh >> 1);        /* xmid = (xlow + xhigh)/2 */
+                ymid = Chebps2(xmid, coef, order);
+                if ((ylow * ymid) <= (Word32) 0)
+                {
+                    yhigh = ymid;
+                    xhigh = xmid;
+                } else
+                {
+                    ylow = ymid;
+                    xlow = xmid;
+                }
+            }
+            /*-------------------------------------------------------------*
+             * Linear interpolation                                        *
+             *    xint = xlow - ylow*(xhigh-xlow)/(yhigh-ylow);            *
+             *-------------------------------------------------------------*/
+            x = xhigh - xlow;
+            y = yhigh - ylow;
+            if (y == 0)
+            {
+                xint = xlow;
+            } else
+            {
+                sign = y;
+                y = abs_s(y);
+                exp = norm_s(y);
+                y = y << exp;
+                y = div_s((Word16) 16383, y);
+                t0 = x * y;
+                t0 = (t0 >> (19 - exp));
+                y = vo_extract_l(t0);         /* y= (xhigh-xlow)/(yhigh-ylow) in Q11 */
+                if (sign < 0)
+                    y = -y;
+                t0 = ylow * y;      /* result in Q26 */
+                t0 = (t0 >> 10);        /* result in Q15 */
+                xint = vo_sub(xlow, vo_extract_l(t0));        /* xint = xlow - ylow*y */
+            }
+            isp[nf] = xint;
+            xlow = xint;
+            nf++;
+            if (ip == 0)
+            {
+                ip = 1;
+                coef = f2;
+                order = NC - 1;
+            } else
+            {
+                ip = 0;
+                coef = f1;
+                order = NC;
+            }
+            ylow = Chebps2(xlow, coef, order);
+        }
+    }
+    /* Check if M-1 roots found */
+    if(nf < M - 1)
+    {
+        for (i = 0; i < M; i++)
+        {
+            isp[i] = old_isp[i];
+        }
+    } else
+    {
+        isp[M - 1] = a[M] << 3;                      /* From Q12 to Q15 with saturation */
+    }
+    return;
 }
 
 /*--------------------------------------------------------------*
@@ -213,55 +213,55 @@
 
 static __inline Word16 Chebps2(Word16 x, Word16 f[], Word32 n)
 {
-	Word32 i, cheb;
-	Word16 b0_h, b0_l, b1_h, b1_l, b2_h, b2_l;
-	Word32 t0;
+    Word32 i, cheb;
+    Word16 b0_h, b0_l, b1_h, b1_l, b2_h, b2_l;
+    Word32 t0;
 
-	/* Note: All computation are done in Q24. */
+    /* Note: All computation are done in Q24. */
 
-	t0 = f[0] << 13;
-	b2_h = t0 >> 16;
-	b2_l = (t0 & 0xffff)>>1;
+    t0 = f[0] << 13;
+    b2_h = t0 >> 16;
+    b2_l = (t0 & 0xffff)>>1;
 
-	t0 = ((b2_h * x)<<1) + (((b2_l * x)>>15)<<1);
-	t0 <<= 1;
-	t0 += (f[1] << 13);						/* + f[1] in Q24        */
+    t0 = ((b2_h * x)<<1) + (((b2_l * x)>>15)<<1);
+    t0 <<= 1;
+    t0 += (f[1] << 13);                     /* + f[1] in Q24        */
 
-	b1_h = t0 >> 16;
-	b1_l = (t0 & 0xffff) >> 1;
+    b1_h = t0 >> 16;
+    b1_l = (t0 & 0xffff) >> 1;
 
-	for (i = 2; i < n; i++)
-	{
-		t0 = ((b1_h * x)<<1) + (((b1_l * x)>>15)<<1);
+    for (i = 2; i < n; i++)
+    {
+        t0 = ((b1_h * x)<<1) + (((b1_l * x)>>15)<<1);
 
-		t0 += (b2_h * (-16384))<<1;
-		t0 += (f[i] << 12);
-		t0 <<= 1;
-		t0 -= (b2_l << 1);					/* t0 = 2.0*x*b1 - b2 + f[i]; */
+        t0 += (b2_h * (-16384))<<1;
+        t0 += (f[i] << 12);
+        t0 <<= 1;
+        t0 -= (b2_l << 1);                  /* t0 = 2.0*x*b1 - b2 + f[i]; */
 
-		b0_h = t0 >> 16;
-		b0_l = (t0 & 0xffff) >> 1;
+        b0_h = t0 >> 16;
+        b0_l = (t0 & 0xffff) >> 1;
 
-		b2_l = b1_l;                         /* b2 = b1; */
-		b2_h = b1_h;
-		b1_l = b0_l;                         /* b1 = b0; */
-		b1_h = b0_h;
-	}
+        b2_l = b1_l;                         /* b2 = b1; */
+        b2_h = b1_h;
+        b1_l = b0_l;                         /* b1 = b0; */
+        b1_h = b0_h;
+    }
 
-	t0 = ((b1_h * x)<<1) + (((b1_l * x)>>15)<<1);
-	t0 += (b2_h * (-32768))<<1;				/* t0 = x*b1 - b2          */
-	t0 -= (b2_l << 1);
-	t0 += (f[n] << 12);						/* t0 = x*b1 - b2 + f[i]/2 */
+    t0 = ((b1_h * x)<<1) + (((b1_l * x)>>15)<<1);
+    t0 += (b2_h * (-32768))<<1;             /* t0 = x*b1 - b2          */
+    t0 -= (b2_l << 1);
+    t0 += (f[n] << 12);                     /* t0 = x*b1 - b2 + f[i]/2 */
 
-	t0 = L_shl2(t0, 6);                     /* Q24 to Q30 with saturation */
+    t0 = L_shl2(t0, 6);                     /* Q24 to Q30 with saturation */
 
-	cheb = extract_h(t0);                  /* Result in Q14              */
+    cheb = extract_h(t0);                  /* Result in Q14              */
 
-	if (cheb == -32768)
-	{
-		cheb = -32767;                     /* to avoid saturation in Az_isp */
-	}
-	return (cheb);
+    if (cheb == -32768)
+    {
+        cheb = -32767;                     /* to avoid saturation in Az_isp */
+    }
+    return (cheb);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/bits.c b/media/libstagefright/codecs/amrwbenc/src/bits.c
index e78dc1f..6b8bddd 100644
--- a/media/libstagefright/codecs/amrwbenc/src/bits.c
+++ b/media/libstagefright/codecs/amrwbenc/src/bits.c
@@ -17,7 +17,7 @@
 /***********************************************************************
        File: bits.c
 
-	   Description: Performs bit stream manipulation
+       Description: Performs bit stream manipulation
 
 ************************************************************************/
 
@@ -33,151 +33,151 @@
 
 
 int PackBits(Word16 prms[],             /*  i: analysis parameters */
-			 Word16 coding_mode,        /*  i: coding bit-stream ratio mode */
-			 Word16 mode,               /*  i: coding bit-stream ratio mode*/
-			 Coder_State *st            /*i/o: coder global parameters struct */
-			 )
+             Word16 coding_mode,        /*  i: coding bit-stream ratio mode */
+             Word16 mode,               /*  i: coding bit-stream ratio mode*/
+             Coder_State *st            /*i/o: coder global parameters struct */
+             )
 {
-	Word16 i, frame_type;
-	UWord8 temp;
-	UWord8 *stream_ptr;
-	Word16 bitstreamformat = st->frameType;
+    Word16 i, frame_type;
+    UWord8 temp;
+    UWord8 *stream_ptr;
+    Word16 bitstreamformat = st->frameType;
 
-	unsigned short* dataOut = st->outputStream;
+    unsigned short* dataOut = st->outputStream;
 
-	if (coding_mode == MRDTX)
-	{
-		st->sid_update_counter--;
+    if (coding_mode == MRDTX)
+    {
+        st->sid_update_counter--;
 
-		if (st->prev_ft == TX_SPEECH)
-		{
-			frame_type = TX_SID_FIRST;
-			st->sid_update_counter = 3;
-		} else
-		{
-			if ((st->sid_handover_debt > 0) && (st->sid_update_counter > 2))
-			{
-				/* ensure extra updates are  properly delayed after a possible SID_FIRST */
-				frame_type = TX_SID_UPDATE;
-				st->sid_handover_debt--;
-			} else
-			{
-				if (st->sid_update_counter == 0)
-				{
-					frame_type = TX_SID_UPDATE;
-					st->sid_update_counter = 8;
-				} else
-				{
-					frame_type = TX_NO_DATA;
-				}
-			}
-		}
-	} else
-	{
-		st->sid_update_counter = 8;
-		frame_type = TX_SPEECH;
-	}
-	st->prev_ft = frame_type;
+        if (st->prev_ft == TX_SPEECH)
+        {
+            frame_type = TX_SID_FIRST;
+            st->sid_update_counter = 3;
+        } else
+        {
+            if ((st->sid_handover_debt > 0) && (st->sid_update_counter > 2))
+            {
+                /* ensure extra updates are  properly delayed after a possible SID_FIRST */
+                frame_type = TX_SID_UPDATE;
+                st->sid_handover_debt--;
+            } else
+            {
+                if (st->sid_update_counter == 0)
+                {
+                    frame_type = TX_SID_UPDATE;
+                    st->sid_update_counter = 8;
+                } else
+                {
+                    frame_type = TX_NO_DATA;
+                }
+            }
+        }
+    } else
+    {
+        st->sid_update_counter = 8;
+        frame_type = TX_SPEECH;
+    }
+    st->prev_ft = frame_type;
 
-	if(bitstreamformat == 0)				/* default file format */
-	{
-		*(dataOut) = TX_FRAME_TYPE;
-		*(dataOut + 1) = frame_type;
-		*(dataOut + 2) = mode;
-		for (i = 0; i < nb_of_bits[coding_mode]; i++)
-		{
-			*(dataOut + 3 + i) = prms[i];
-		}
-		return  (3 + nb_of_bits[coding_mode])<<1;
-	} else
-	{
-		if (bitstreamformat == 1)		/* ITU file format */
-		{
-			*(dataOut) = 0x6b21;
-			if(frame_type != TX_NO_DATA && frame_type != TX_SID_FIRST)
-			{
-				*(dataOut + 1) = nb_of_bits[coding_mode];
-				for (i = 0; i < nb_of_bits[coding_mode]; i++)
-				{
-					if(prms[i] == BIT_0){
-						*(dataOut + 2 + i) = BIT_0_ITU;
-					}
-					else{
-						*(dataOut + 2 + i) = BIT_1_ITU;
-					}
-				}
-				return (2 + nb_of_bits[coding_mode])<<1;
-			} else
-			{
-				*(dataOut + 1) = 0;
-				return 2<<1;
-			}
-		} else							/* MIME/storage file format */
-		{
+    if(bitstreamformat == 0)                /* default file format */
+    {
+        *(dataOut) = TX_FRAME_TYPE;
+        *(dataOut + 1) = frame_type;
+        *(dataOut + 2) = mode;
+        for (i = 0; i < nb_of_bits[coding_mode]; i++)
+        {
+            *(dataOut + 3 + i) = prms[i];
+        }
+        return  (3 + nb_of_bits[coding_mode])<<1;
+    } else
+    {
+        if (bitstreamformat == 1)       /* ITU file format */
+        {
+            *(dataOut) = 0x6b21;
+            if(frame_type != TX_NO_DATA && frame_type != TX_SID_FIRST)
+            {
+                *(dataOut + 1) = nb_of_bits[coding_mode];
+                for (i = 0; i < nb_of_bits[coding_mode]; i++)
+                {
+                    if(prms[i] == BIT_0){
+                        *(dataOut + 2 + i) = BIT_0_ITU;
+                    }
+                    else{
+                        *(dataOut + 2 + i) = BIT_1_ITU;
+                    }
+                }
+                return (2 + nb_of_bits[coding_mode])<<1;
+            } else
+            {
+                *(dataOut + 1) = 0;
+                return 2<<1;
+            }
+        } else                          /* MIME/storage file format */
+        {
 #define MRSID 9
-			/* change mode index in case of SID frame */
-			if (coding_mode == MRDTX)
-			{
-				coding_mode = MRSID;
-				if (frame_type == TX_SID_FIRST)
-				{
-					for (i = 0; i < NBBITS_SID; i++)	prms[i] = BIT_0;
-				}
-			}
-			/* -> force NO_DATA frame */
-			if (coding_mode < 0 || coding_mode > 15 || (coding_mode > MRSID && coding_mode < 14))
-			{
-				coding_mode = 15;
-			}
-			/* mark empty frames between SID updates as NO_DATA frames */
-			if (coding_mode == MRSID && frame_type == TX_NO_DATA)
-			{
-				coding_mode = 15;
-			}
-			/* set pointer for packed frame, note that we handle data as bytes */
-			stream_ptr = (UWord8*)dataOut;
-			/* insert table of contents (ToC) byte at the beginning of the packet */
-			*stream_ptr = toc_byte[coding_mode];
-			stream_ptr++;
-			temp = 0;
-			/* sort and pack AMR-WB speech or SID bits */
-			for (i = 1; i < unpacked_size[coding_mode] + 1; i++)
-			{
-				if (prms[sort_ptr[coding_mode][i-1]] == BIT_1)
-				{
-					temp++;
-				}
-				if (i&0x7)
-				{
-					temp <<= 1;
-				}
-				else
-				{
-					*stream_ptr = temp;
-					stream_ptr++;
-					temp = 0;
-				}
-			}
-			/* insert SID type indication and speech mode in case of SID frame */
-			if (coding_mode == MRSID)
-			{
-				if (frame_type == TX_SID_UPDATE)
-				{
-					temp++;
-				}
-				temp <<= 4;
-				temp += mode & 0x000F;
-			}
-			/* insert unused bits (zeros) at the tail of the last byte */
-			if (unused_size[coding_mode])
-			{
-				temp <<= (unused_size[coding_mode] - 1);
-			}
-			*stream_ptr = temp;
-			/* write packed frame into file (1 byte added to cover ToC entry) */
-			return (1 + packed_size[coding_mode]);
-		}
-	}
+            /* change mode index in case of SID frame */
+            if (coding_mode == MRDTX)
+            {
+                coding_mode = MRSID;
+                if (frame_type == TX_SID_FIRST)
+                {
+                    for (i = 0; i < NBBITS_SID; i++)    prms[i] = BIT_0;
+                }
+            }
+            /* -> force NO_DATA frame */
+            if (coding_mode < 0 || coding_mode > 15 || (coding_mode > MRSID && coding_mode < 14))
+            {
+                coding_mode = 15;
+            }
+            /* mark empty frames between SID updates as NO_DATA frames */
+            if (coding_mode == MRSID && frame_type == TX_NO_DATA)
+            {
+                coding_mode = 15;
+            }
+            /* set pointer for packed frame, note that we handle data as bytes */
+            stream_ptr = (UWord8*)dataOut;
+            /* insert table of contents (ToC) byte at the beginning of the packet */
+            *stream_ptr = toc_byte[coding_mode];
+            stream_ptr++;
+            temp = 0;
+            /* sort and pack AMR-WB speech or SID bits */
+            for (i = 1; i < unpacked_size[coding_mode] + 1; i++)
+            {
+                if (prms[sort_ptr[coding_mode][i-1]] == BIT_1)
+                {
+                    temp++;
+                }
+                if (i&0x7)
+                {
+                    temp <<= 1;
+                }
+                else
+                {
+                    *stream_ptr = temp;
+                    stream_ptr++;
+                    temp = 0;
+                }
+            }
+            /* insert SID type indication and speech mode in case of SID frame */
+            if (coding_mode == MRSID)
+            {
+                if (frame_type == TX_SID_UPDATE)
+                {
+                    temp++;
+                }
+                temp <<= 4;
+                temp += mode & 0x000F;
+            }
+            /* insert unused bits (zeros) at the tail of the last byte */
+            if (unused_size[coding_mode])
+            {
+                temp <<= (unused_size[coding_mode] - 1);
+            }
+            *stream_ptr = temp;
+            /* write packed frame into file (1 byte added to cover ToC entry) */
+            return (1 + packed_size[coding_mode]);
+        }
+    }
 }
 
 /*-----------------------------------------------------*
@@ -185,24 +185,24 @@
 *-----------------------------------------------------*/
 
 void Parm_serial(
-		Word16 value,                         /* input : parameter value */
-		Word16 no_of_bits,                    /* input : number of bits  */
-		Word16 ** prms
-		)
+        Word16 value,                         /* input : parameter value */
+        Word16 no_of_bits,                    /* input : number of bits  */
+        Word16 ** prms
+        )
 {
-	Word16 i, bit;
-	*prms += no_of_bits;
-	for (i = 0; i < no_of_bits; i++)
-	{
-		bit = (Word16) (value & 0x0001);    /* get lsb */
-		if (bit == 0)
-			*--(*prms) = BIT_0;
-		else
-			*--(*prms) = BIT_1;
-		value >>= 1;
-	}
-	*prms += no_of_bits;
-	return;
+    Word16 i, bit;
+    *prms += no_of_bits;
+    for (i = 0; i < no_of_bits; i++)
+    {
+        bit = (Word16) (value & 0x0001);    /* get lsb */
+        if (bit == 0)
+            *--(*prms) = BIT_0;
+        else
+            *--(*prms) = BIT_1;
+        value >>= 1;
+    }
+    *prms += no_of_bits;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c b/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
index 18698e2..dbb94c6 100644
--- a/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
@@ -17,7 +17,7 @@
 /************************************************************************
 *      File: c2t64fx.c                                                  *
 *                                                                       *
-*	   Description:Performs algebraic codebook search for 6.60kbits mode*
+*      Description:Performs algebraic codebook search for 6.60kbits mode*
 *                                                                       *
 *************************************************************************/
 
@@ -45,252 +45,255 @@
 **************************************************************************/
 
 void ACELP_2t64_fx(
-		Word16 dn[],                          /* (i) <12b : correlation between target x[] and H[]      */
-		Word16 cn[],                          /* (i) <12b : residual after long term prediction         */
-		Word16 H[],                           /* (i) Q12: impulse response of weighted synthesis filter */
-		Word16 code[],                        /* (o) Q9 : algebraic (fixed) codebook excitation         */
-		Word16 y[],                           /* (o) Q9 : filtered fixed codebook excitation            */
-		Word16 * index                        /* (o) : index (12): 5+1+5+1 = 11 bits.                   */
-		)
+        Word16 dn[],                          /* (i) <12b : correlation between target x[] and H[]      */
+        Word16 cn[],                          /* (i) <12b : residual after long term prediction         */
+        Word16 H[],                           /* (i) Q12: impulse response of weighted synthesis filter */
+        Word16 code[],                        /* (o) Q9 : algebraic (fixed) codebook excitation         */
+        Word16 y[],                           /* (o) Q9 : filtered fixed codebook excitation            */
+        Word16 * index                        /* (o) : index (12): 5+1+5+1 = 11 bits.                   */
+        )
 {
-	Word32 i, j, k, i0, i1, ix, iy, pos, pos2;
-	Word16 ps, psk, ps1, ps2, alpk, alp1, alp2, sq;
-	Word16 alp, val, exp, k_cn, k_dn;
-	Word16 *p0, *p1, *p2, *psign;
-	Word16 *h, *h_inv, *ptr_h1, *ptr_h2, *ptr_hf;
+    Word32 i, j, k, i0, i1, ix, iy, pos, pos2;
+    Word16 ps, psk, ps1, ps2, alpk, alp1, alp2, sq;
+    Word16 alp, val, exp, k_cn, k_dn;
+    Word16 *p0, *p1, *p2, *psign;
+    Word16 *h, *h_inv, *ptr_h1, *ptr_h2, *ptr_hf;
 
-	Word16 sign[L_SUBFR], vec[L_SUBFR], dn2[L_SUBFR];
-	Word16 h_buf[4 * L_SUBFR] = {0};
-	Word16 rrixix[NB_TRACK][NB_POS];
-	Word16 rrixiy[MSIZE];
-	Word32 s, cor;
+    Word16 sign[L_SUBFR], vec[L_SUBFR], dn2[L_SUBFR];
+    Word16 h_buf[4 * L_SUBFR] = {0};
+    Word16 rrixix[NB_TRACK][NB_POS];
+    Word16 rrixiy[MSIZE];
+    Word32 s, cor;
 
-	/*----------------------------------------------------------------*
-	 * Find sign for each pulse position.                             *
-	 *----------------------------------------------------------------*/
-	alp = 8192;                              /* alp = 2.0 (Q12) */
+    /*----------------------------------------------------------------*
+     * Find sign for each pulse position.                             *
+     *----------------------------------------------------------------*/
+    alp = 8192;                              /* alp = 2.0 (Q12) */
 
-	/* calculate energy for normalization of cn[] and dn[] */
-	/* set k_cn = 32..32767 (ener_cn = 2^30..256-0) */
+    /* calculate energy for normalization of cn[] and dn[] */
+    /* set k_cn = 32..32767 (ener_cn = 2^30..256-0) */
 #ifdef ASM_OPT             /* asm optimization branch */
-	s = Dot_product12_asm(cn, cn, L_SUBFR, &exp);
+    s = Dot_product12_asm(cn, cn, L_SUBFR, &exp);
 #else
-	s = Dot_product12(cn, cn, L_SUBFR, &exp);
+    s = Dot_product12(cn, cn, L_SUBFR, &exp);
 #endif
 
-	Isqrt_n(&s, &exp);
-	s = L_shl(s, add1(exp, 5));
-	k_cn = vo_round(s);
+    Isqrt_n(&s, &exp);
+    s = L_shl(s, add1(exp, 5));
+    if (s > INT_MAX - 0x8000) {
+        s = INT_MAX - 0x8000;
+    }
+    k_cn = vo_round(s);
 
-	/* set k_dn = 32..512 (ener_dn = 2^30..2^22) */
+    /* set k_dn = 32..512 (ener_dn = 2^30..2^22) */
 #ifdef ASM_OPT                  /* asm optimization branch */
-	s = Dot_product12_asm(dn, dn, L_SUBFR, &exp);
+    s = Dot_product12_asm(dn, dn, L_SUBFR, &exp);
 #else
-	s = Dot_product12(dn, dn, L_SUBFR, &exp);
+    s = Dot_product12(dn, dn, L_SUBFR, &exp);
 #endif
 
-	Isqrt_n(&s, &exp);
-	k_dn = vo_round(L_shl(s, (exp + 8)));    /* k_dn = 256..4096 */
-	k_dn = vo_mult_r(alp, k_dn);              /* alp in Q12 */
+    Isqrt_n(&s, &exp);
+    k_dn = voround(L_shl(s, (exp + 8)));    /* k_dn = 256..4096 */
+    k_dn = vo_mult_r(alp, k_dn);              /* alp in Q12 */
 
-	/* mix normalized cn[] and dn[] */
-	p0 = cn;
-	p1 = dn;
-	p2 = dn2;
+    /* mix normalized cn[] and dn[] */
+    p0 = cn;
+    p1 = dn;
+    p2 = dn2;
 
-	for (i = 0; i < L_SUBFR/4; i++)
-	{
-		s = (k_cn* (*p0++))+(k_dn * (*p1++));
-		*p2++ = s >> 7;
-		s = (k_cn* (*p0++))+(k_dn * (*p1++));
-		*p2++ = s >> 7;
-		s = (k_cn* (*p0++))+(k_dn * (*p1++));
-		*p2++ = s >> 7;
-		s = (k_cn* (*p0++))+(k_dn * (*p1++));
-		*p2++ = s >> 7;
-	}
+    for (i = 0; i < L_SUBFR/4; i++)
+    {
+        s = (k_cn* (*p0++))+(k_dn * (*p1++));
+        *p2++ = s >> 7;
+        s = (k_cn* (*p0++))+(k_dn * (*p1++));
+        *p2++ = s >> 7;
+        s = (k_cn* (*p0++))+(k_dn * (*p1++));
+        *p2++ = s >> 7;
+        s = (k_cn* (*p0++))+(k_dn * (*p1++));
+        *p2++ = s >> 7;
+    }
 
-	/* set sign according to dn2[] = k_cn*cn[] + k_dn*dn[]    */
-	for (i = 0; i < L_SUBFR; i ++)
-	{
-		val = dn[i];
-		ps = dn2[i];
-		if (ps >= 0)
-		{
-			sign[i] = 32767;             /* sign = +1 (Q12) */
-			vec[i] = -32768;
-		} else
-		{
-			sign[i] = -32768;            /* sign = -1 (Q12) */
-			vec[i] = 32767;
-			dn[i] = -val;
-		}
-	}
-	/*------------------------------------------------------------*
-	 * Compute h_inv[i].                                          *
-	 *------------------------------------------------------------*/
-	/* impulse response buffer for fast computation */
-	h = h_buf + L_SUBFR;
-	h_inv = h + (L_SUBFR<<1);
+    /* set sign according to dn2[] = k_cn*cn[] + k_dn*dn[]    */
+    for (i = 0; i < L_SUBFR; i ++)
+    {
+        val = dn[i];
+        ps = dn2[i];
+        if (ps >= 0)
+        {
+            sign[i] = 32767;             /* sign = +1 (Q12) */
+            vec[i] = -32768;
+        } else
+        {
+            sign[i] = -32768;            /* sign = -1 (Q12) */
+            vec[i] = 32767;
+            dn[i] = -val;
+        }
+    }
+    /*------------------------------------------------------------*
+     * Compute h_inv[i].                                          *
+     *------------------------------------------------------------*/
+    /* impulse response buffer for fast computation */
+    h = h_buf + L_SUBFR;
+    h_inv = h + (L_SUBFR<<1);
 
-	for (i = 0; i < L_SUBFR; i++)
-	{
-		h[i] = H[i];
-		h_inv[i] = vo_negate(h[i]);
-	}
+    for (i = 0; i < L_SUBFR; i++)
+    {
+        h[i] = H[i];
+        h_inv[i] = vo_negate(h[i]);
+    }
 
-	/*------------------------------------------------------------*
-	 * Compute rrixix[][] needed for the codebook search.         *
-	 * Result is multiplied by 0.5                                *
-	 *------------------------------------------------------------*/
-	/* Init pointers to last position of rrixix[] */
-	p0 = &rrixix[0][NB_POS - 1];
-	p1 = &rrixix[1][NB_POS - 1];
+    /*------------------------------------------------------------*
+     * Compute rrixix[][] needed for the codebook search.         *
+     * Result is multiplied by 0.5                                *
+     *------------------------------------------------------------*/
+    /* Init pointers to last position of rrixix[] */
+    p0 = &rrixix[0][NB_POS - 1];
+    p1 = &rrixix[1][NB_POS - 1];
 
-	ptr_h1 = h;
-	cor = 0x00010000L;                          /* for rounding */
-	for (i = 0; i < NB_POS; i++)
-	{
-		cor += ((*ptr_h1) * (*ptr_h1) << 1);
-		ptr_h1++;
-		*p1-- = (extract_h(cor) >> 1);
-		cor += ((*ptr_h1) * (*ptr_h1) << 1);
-		ptr_h1++;
-		*p0-- = (extract_h(cor) >> 1);
-	}
+    ptr_h1 = h;
+    cor = 0x00010000L;                          /* for rounding */
+    for (i = 0; i < NB_POS; i++)
+    {
+        cor += ((*ptr_h1) * (*ptr_h1) << 1);
+        ptr_h1++;
+        *p1-- = (extract_h(cor) >> 1);
+        cor += ((*ptr_h1) * (*ptr_h1) << 1);
+        ptr_h1++;
+        *p0-- = (extract_h(cor) >> 1);
+    }
 
-	/*------------------------------------------------------------*
-	 * Compute rrixiy[][] needed for the codebook search.         *
-	 *------------------------------------------------------------*/
-	pos = MSIZE - 1;
-	pos2 = MSIZE - 2;
-	ptr_hf = h + 1;
+    /*------------------------------------------------------------*
+     * Compute rrixiy[][] needed for the codebook search.         *
+     *------------------------------------------------------------*/
+    pos = MSIZE - 1;
+    pos2 = MSIZE - 2;
+    ptr_hf = h + 1;
 
-	for (k = 0; k < NB_POS; k++)
-	{
-		p1 = &rrixiy[pos];
-		p0 = &rrixiy[pos2];
-		cor = 0x00008000L;                        /* for rounding */
-		ptr_h1 = h;
-		ptr_h2 = ptr_hf;
+    for (k = 0; k < NB_POS; k++)
+    {
+        p1 = &rrixiy[pos];
+        p0 = &rrixiy[pos2];
+        cor = 0x00008000L;                        /* for rounding */
+        ptr_h1 = h;
+        ptr_h2 = ptr_hf;
 
-		for (i = (k + 1); i < NB_POS; i++)
-		{
-			cor += ((*ptr_h1) * (*ptr_h2))<<1;
-			ptr_h1++;
-			ptr_h2++;
-			*p1 = extract_h(cor);
-			cor += ((*ptr_h1) * (*ptr_h2))<<1;
-			ptr_h1++;
-			ptr_h2++;
-			*p0 = extract_h(cor);
+        for (i = (k + 1); i < NB_POS; i++)
+        {
+            cor += ((*ptr_h1) * (*ptr_h2))<<1;
+            ptr_h1++;
+            ptr_h2++;
+            *p1 = extract_h(cor);
+            cor += ((*ptr_h1) * (*ptr_h2))<<1;
+            ptr_h1++;
+            ptr_h2++;
+            *p0 = extract_h(cor);
 
-			p1 -= (NB_POS + 1);
-			p0 -= (NB_POS + 1);
-		}
-		cor += ((*ptr_h1) * (*ptr_h2))<<1;
-		ptr_h1++;
-		ptr_h2++;
-		*p1 = extract_h(cor);
+            p1 -= (NB_POS + 1);
+            p0 -= (NB_POS + 1);
+        }
+        cor += ((*ptr_h1) * (*ptr_h2))<<1;
+        ptr_h1++;
+        ptr_h2++;
+        *p1 = extract_h(cor);
 
-		pos -= NB_POS;
-		pos2--;
-		ptr_hf += STEP;
-	}
+        pos -= NB_POS;
+        pos2--;
+        ptr_hf += STEP;
+    }
 
-	/*------------------------------------------------------------*
-	 * Modification of rrixiy[][] to take signs into account.     *
-	 *------------------------------------------------------------*/
-	p0 = rrixiy;
-	for (i = 0; i < L_SUBFR; i += STEP)
-	{
-		psign = sign;
-		if (psign[i] < 0)
-		{
-			psign = vec;
-		}
-		for (j = 1; j < L_SUBFR; j += STEP)
-		{
-			*p0 = vo_mult(*p0, psign[j]);
-			p0++;
-		}
-	}
-	/*-------------------------------------------------------------------*
-	 * search 2 pulses:                                                  *
-	 * ~@~~~~~~~~~~~~~~                                                  *
-	 * 32 pos x 32 pos = 1024 tests (all combinaisons is tested)         *
-	 *-------------------------------------------------------------------*/
-	p0 = rrixix[0];
-	p1 = rrixix[1];
-	p2 = rrixiy;
+    /*------------------------------------------------------------*
+     * Modification of rrixiy[][] to take signs into account.     *
+     *------------------------------------------------------------*/
+    p0 = rrixiy;
+    for (i = 0; i < L_SUBFR; i += STEP)
+    {
+        psign = sign;
+        if (psign[i] < 0)
+        {
+            psign = vec;
+        }
+        for (j = 1; j < L_SUBFR; j += STEP)
+        {
+            *p0 = vo_mult(*p0, psign[j]);
+            p0++;
+        }
+    }
+    /*-------------------------------------------------------------------*
+     * search 2 pulses:                                                  *
+     * ~@~~~~~~~~~~~~~~                                                  *
+     * 32 pos x 32 pos = 1024 tests (all combinaisons is tested)         *
+     *-------------------------------------------------------------------*/
+    p0 = rrixix[0];
+    p1 = rrixix[1];
+    p2 = rrixiy;
 
-	psk = -1;
-	alpk = 1;
-	ix = 0;
-	iy = 1;
+    psk = -1;
+    alpk = 1;
+    ix = 0;
+    iy = 1;
 
-	for (i0 = 0; i0 < L_SUBFR; i0 += STEP)
-	{
-		ps1 = dn[i0];
-		alp1 = (*p0++);
-		pos = -1;
-		for (i1 = 1; i1 < L_SUBFR; i1 += STEP)
-		{
-			ps2 = add1(ps1, dn[i1]);
-			alp2 = add1(alp1, add1(*p1++, *p2++));
-			sq = vo_mult(ps2, ps2);
-			s = vo_L_mult(alpk, sq) - ((psk * alp2)<<1);
-			if (s > 0)
-			{
-				psk = sq;
-				alpk = alp2;
-				pos = i1;
-			}
-		}
-		p1 -= NB_POS;
-		if (pos >= 0)
-		{
-			ix = i0;
-			iy = pos;
-		}
-	}
-	/*-------------------------------------------------------------------*
-	 * Build the codeword, the filtered codeword and index of codevector.*
-	 *-------------------------------------------------------------------*/
+    for (i0 = 0; i0 < L_SUBFR; i0 += STEP)
+    {
+        ps1 = dn[i0];
+        alp1 = (*p0++);
+        pos = -1;
+        for (i1 = 1; i1 < L_SUBFR; i1 += STEP)
+        {
+            ps2 = add1(ps1, dn[i1]);
+            alp2 = add1(alp1, add1(*p1++, *p2++));
+            sq = vo_mult(ps2, ps2);
+            s = vo_L_mult(alpk, sq) - ((psk * alp2)<<1);
+            if (s > 0)
+            {
+                psk = sq;
+                alpk = alp2;
+                pos = i1;
+            }
+        }
+        p1 -= NB_POS;
+        if (pos >= 0)
+        {
+            ix = i0;
+            iy = pos;
+        }
+    }
+    /*-------------------------------------------------------------------*
+     * Build the codeword, the filtered codeword and index of codevector.*
+     *-------------------------------------------------------------------*/
 
-	for (i = 0; i < L_SUBFR; i++)
-	{
-		code[i] = 0;
-	}
+    for (i = 0; i < L_SUBFR; i++)
+    {
+        code[i] = 0;
+    }
 
-	i0 = (ix >> 1);                       /* pos of pulse 1 (0..31) */
-	i1 = (iy >> 1);                       /* pos of pulse 2 (0..31) */
-	if (sign[ix] > 0)
-	{
-		code[ix] = 512;                     /* codeword in Q9 format */
-		p0 = h - ix;
-	} else
-	{
-		code[ix] = -512;
-		i0 += NB_POS;
-		p0 = h_inv - ix;
-	}
-	if (sign[iy] > 0)
-	{
-		code[iy] = 512;
-		p1 = h - iy;
-	} else
-	{
-		code[iy] = -512;
-		i1 += NB_POS;
-		p1 = h_inv - iy;
-	}
-	*index = add1((i0 << 6), i1);
-	for (i = 0; i < L_SUBFR; i++)
-	{
-		y[i] = vo_shr_r(add1((*p0++), (*p1++)), 3);
-	}
-	return;
+    i0 = (ix >> 1);                       /* pos of pulse 1 (0..31) */
+    i1 = (iy >> 1);                       /* pos of pulse 2 (0..31) */
+    if (sign[ix] > 0)
+    {
+        code[ix] = 512;                     /* codeword in Q9 format */
+        p0 = h - ix;
+    } else
+    {
+        code[ix] = -512;
+        i0 += NB_POS;
+        p0 = h_inv - ix;
+    }
+    if (sign[iy] > 0)
+    {
+        code[iy] = 512;
+        p1 = h - iy;
+    } else
+    {
+        code[iy] = -512;
+        i1 += NB_POS;
+        p1 = h_inv - iy;
+    }
+    *index = add1((i0 << 6), i1);
+    for (i = 0; i < L_SUBFR; i++)
+    {
+        y[i] = vo_shr_r(add1((*p0++), (*p1++)), 3);
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
index 1ecc11f..8cebb09 100644
--- a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
@@ -17,7 +17,7 @@
 /***********************************************************************
 *      File: c4t64fx.c                                                 *
 *                                                                      *
-*	   Description:Performs algebraic codebook search for higher modes *
+*      Description:Performs algebraic codebook search for higher modes *
 *                                                                      *
 ************************************************************************/
 
@@ -48,15 +48,15 @@
 #include "q_pulse.h"
 
 static Word16 tipos[36] = {
-	0, 1, 2, 3,                            /* starting point &ipos[0], 1st iter */
-	1, 2, 3, 0,                            /* starting point &ipos[4], 2nd iter */
-	2, 3, 0, 1,                            /* starting point &ipos[8], 3rd iter */
-	3, 0, 1, 2,                            /* starting point &ipos[12], 4th iter */
-	0, 1, 2, 3,
-	1, 2, 3, 0,
-	2, 3, 0, 1,
-	3, 0, 1, 2,
-	0, 1, 2, 3};                           /* end point for 24 pulses &ipos[35], 4th iter */
+    0, 1, 2, 3,                            /* starting point &ipos[0], 1st iter */
+    1, 2, 3, 0,                            /* starting point &ipos[4], 2nd iter */
+    2, 3, 0, 1,                            /* starting point &ipos[8], 3rd iter */
+    3, 0, 1, 2,                            /* starting point &ipos[12], 4th iter */
+    0, 1, 2, 3,
+    1, 2, 3, 0,
+    2, 3, 0, 1,
+    3, 0, 1, 2,
+    0, 1, 2, 3};                           /* end point for 24 pulses &ipos[35], 4th iter */
 
 #define NB_PULSE_MAX  24
 
@@ -70,751 +70,759 @@
 
 /* Private functions */
 void cor_h_vec_012(
-		Word16 h[],                           /* (i) scaled impulse response                 */
-		Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
-		Word16 track,                         /* (i) track to use                            */
-		Word16 sign[],                        /* (i) sign vector                             */
-		Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
-		Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
-		Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
-		);
+        Word16 h[],                           /* (i) scaled impulse response                 */
+        Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
+        Word16 track,                         /* (i) track to use                            */
+        Word16 sign[],                        /* (i) sign vector                             */
+        Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
+        Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
+        Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
+        );
 
 void cor_h_vec_012_asm(
-		Word16 h[],                           /* (i) scaled impulse response                 */
-		Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
-		Word16 track,                         /* (i) track to use                            */
-		Word16 sign[],                        /* (i) sign vector                             */
-		Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
-		Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
-		Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
-		);
+        Word16 h[],                           /* (i) scaled impulse response                 */
+        Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
+        Word16 track,                         /* (i) track to use                            */
+        Word16 sign[],                        /* (i) sign vector                             */
+        Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
+        Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
+        Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
+        );
 
 void cor_h_vec_30(
-		Word16 h[],                           /* (i) scaled impulse response                 */
-		Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
-		Word16 track,                         /* (i) track to use                            */
-		Word16 sign[],                        /* (i) sign vector                             */
-		Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
-		Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
-		Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
-		);
+        Word16 h[],                           /* (i) scaled impulse response                 */
+        Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
+        Word16 track,                         /* (i) track to use                            */
+        Word16 sign[],                        /* (i) sign vector                             */
+        Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
+        Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
+        Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
+        );
 
 void search_ixiy(
-		Word16 nb_pos_ix,                     /* (i) nb of pos for pulse 1 (1..8)       */
-		Word16 track_x,                       /* (i) track of pulse 1                   */
-		Word16 track_y,                       /* (i) track of pulse 2                   */
-		Word16 * ps,                          /* (i/o) correlation of all fixed pulses  */
-		Word16 * alp,                         /* (i/o) energy of all fixed pulses       */
-		Word16 * ix,                          /* (o) position of pulse 1                */
-		Word16 * iy,                          /* (o) position of pulse 2                */
-		Word16 dn[],                          /* (i) corr. between target and h[]       */
-		Word16 dn2[],                         /* (i) vector of selected positions       */
-		Word16 cor_x[],                       /* (i) corr. of pulse 1 with fixed pulses */
-		Word16 cor_y[],                       /* (i) corr. of pulse 2 with fixed pulses */
-		Word16 rrixiy[][MSIZE]                /* (i) corr. of pulse 1 with pulse 2   */
-		);
+        Word16 nb_pos_ix,                     /* (i) nb of pos for pulse 1 (1..8)       */
+        Word16 track_x,                       /* (i) track of pulse 1                   */
+        Word16 track_y,                       /* (i) track of pulse 2                   */
+        Word16 * ps,                          /* (i/o) correlation of all fixed pulses  */
+        Word16 * alp,                         /* (i/o) energy of all fixed pulses       */
+        Word16 * ix,                          /* (o) position of pulse 1                */
+        Word16 * iy,                          /* (o) position of pulse 2                */
+        Word16 dn[],                          /* (i) corr. between target and h[]       */
+        Word16 dn2[],                         /* (i) vector of selected positions       */
+        Word16 cor_x[],                       /* (i) corr. of pulse 1 with fixed pulses */
+        Word16 cor_y[],                       /* (i) corr. of pulse 2 with fixed pulses */
+        Word16 rrixiy[][MSIZE]                /* (i) corr. of pulse 1 with pulse 2   */
+        );
 
 
 void ACELP_4t64_fx(
-		Word16 dn[],                          /* (i) <12b : correlation between target x[] and H[]      */
-		Word16 cn[],                          /* (i) <12b : residual after long term prediction         */
-		Word16 H[],                           /* (i) Q12: impulse response of weighted synthesis filter */
-		Word16 code[],                        /* (o) Q9 : algebraic (fixed) codebook excitation         */
-		Word16 y[],                           /* (o) Q9 : filtered fixed codebook excitation            */
-		Word16 nbbits,                        /* (i) : 20, 36, 44, 52, 64, 72 or 88 bits                */
-		Word16 ser_size,                      /* (i) : bit rate                                         */
-		Word16 _index[]                       /* (o) : index (20): 5+5+5+5 = 20 bits.                   */
-		/* (o) : index (36): 9+9+9+9 = 36 bits.                   */
-		/* (o) : index (44): 13+9+13+9 = 44 bits.                 */
-		/* (o) : index (52): 13+13+13+13 = 52 bits.               */
-		/* (o) : index (64): 2+2+2+2+14+14+14+14 = 64 bits.       */
-		/* (o) : index (72): 10+2+10+2+10+14+10+14 = 72 bits.     */
-		/* (o) : index (88): 11+11+11+11+11+11+11+11 = 88 bits.   */
-		)
+        Word16 dn[],                          /* (i) <12b : correlation between target x[] and H[]      */
+        Word16 cn[],                          /* (i) <12b : residual after long term prediction         */
+        Word16 H[],                           /* (i) Q12: impulse response of weighted synthesis filter */
+        Word16 code[],                        /* (o) Q9 : algebraic (fixed) codebook excitation         */
+        Word16 y[],                           /* (o) Q9 : filtered fixed codebook excitation            */
+        Word16 nbbits,                        /* (i) : 20, 36, 44, 52, 64, 72 or 88 bits                */
+        Word16 ser_size,                      /* (i) : bit rate                                         */
+        Word16 _index[]                       /* (o) : index (20): 5+5+5+5 = 20 bits.                   */
+        /* (o) : index (36): 9+9+9+9 = 36 bits.                   */
+        /* (o) : index (44): 13+9+13+9 = 44 bits.                 */
+        /* (o) : index (52): 13+13+13+13 = 52 bits.               */
+        /* (o) : index (64): 2+2+2+2+14+14+14+14 = 64 bits.       */
+        /* (o) : index (72): 10+2+10+2+10+14+10+14 = 72 bits.     */
+        /* (o) : index (88): 11+11+11+11+11+11+11+11 = 88 bits.   */
+        )
 {
-	Word32 i, j, k;
-	Word16 st, ix, iy, pos, index, track, nb_pulse, nbiter, j_temp;
-	Word16 psk, ps, alpk, alp, val, k_cn, k_dn, exp;
-	Word16 *p0, *p1, *p2, *p3, *psign;
-	Word16 *h, *h_inv, *ptr_h1, *ptr_h2, *ptr_hf, h_shift;
-	Word32 s, cor, L_tmp, L_index;
-	Word16 dn2[L_SUBFR], sign[L_SUBFR], vec[L_SUBFR];
-	Word16 ind[NPMAXPT * NB_TRACK];
-	Word16 codvec[NB_PULSE_MAX], nbpos[10];
-	Word16 cor_x[NB_POS], cor_y[NB_POS], pos_max[NB_TRACK];
-	Word16 h_buf[4 * L_SUBFR];
-	Word16 rrixix[NB_TRACK][NB_POS], rrixiy[NB_TRACK][MSIZE];
-	Word16 ipos[NB_PULSE_MAX];
+    Word32 i, j, k;
+    Word16 st, ix, iy, pos, index, track, nb_pulse, nbiter, j_temp;
+    Word16 psk, ps, alpk, alp, val, k_cn, k_dn, exp;
+    Word16 *p0, *p1, *p2, *p3, *psign;
+    Word16 *h, *h_inv, *ptr_h1, *ptr_h2, *ptr_hf, h_shift;
+    Word32 s, cor, L_tmp, L_index;
+    Word16 dn2[L_SUBFR], sign[L_SUBFR], vec[L_SUBFR];
+    Word16 ind[NPMAXPT * NB_TRACK];
+    Word16 codvec[NB_PULSE_MAX], nbpos[10];
+    Word16 cor_x[NB_POS], cor_y[NB_POS], pos_max[NB_TRACK];
+    Word16 h_buf[4 * L_SUBFR];
+    Word16 rrixix[NB_TRACK][NB_POS], rrixiy[NB_TRACK][MSIZE];
+    Word16 ipos[NB_PULSE_MAX];
 
-	switch (nbbits)
-	{
-		case 20:                               /* 20 bits, 4 pulses, 4 tracks */
-			nbiter = 4;                          /* 4x16x16=1024 loop */
-			alp = 8192;                          /* alp = 2.0 (Q12) */
-			nb_pulse = 4;
-			nbpos[0] = 4;
-			nbpos[1] = 8;
-			break;
-		case 36:                               /* 36 bits, 8 pulses, 4 tracks */
-			nbiter = 4;                          /* 4x20x16=1280 loop */
-			alp = 4096;                          /* alp = 1.0 (Q12) */
-			nb_pulse = 8;
-			nbpos[0] = 4;
-			nbpos[1] = 8;
-			nbpos[2] = 8;
-			break;
-		case 44:                               /* 44 bits, 10 pulses, 4 tracks */
-			nbiter = 4;                          /* 4x26x16=1664 loop */
-			alp = 4096;                          /* alp = 1.0 (Q12) */
-			nb_pulse = 10;
-			nbpos[0] = 4;
-			nbpos[1] = 6;
-			nbpos[2] = 8;
-			nbpos[3] = 8;
-			break;
-		case 52:                               /* 52 bits, 12 pulses, 4 tracks */
-			nbiter = 4;                          /* 4x26x16=1664 loop */
-			alp = 4096;                          /* alp = 1.0 (Q12) */
-			nb_pulse = 12;
-			nbpos[0] = 4;
-			nbpos[1] = 6;
-			nbpos[2] = 8;
-			nbpos[3] = 8;
-			break;
-		case 64:                               /* 64 bits, 16 pulses, 4 tracks */
-			nbiter = 3;                          /* 3x36x16=1728 loop */
-			alp = 3277;                          /* alp = 0.8 (Q12) */
-			nb_pulse = 16;
-			nbpos[0] = 4;
-			nbpos[1] = 4;
-			nbpos[2] = 6;
-			nbpos[3] = 6;
-			nbpos[4] = 8;
-			nbpos[5] = 8;
-			break;
-		case 72:                               /* 72 bits, 18 pulses, 4 tracks */
-			nbiter = 3;                          /* 3x35x16=1680 loop */
-			alp = 3072;                          /* alp = 0.75 (Q12) */
-			nb_pulse = 18;
-			nbpos[0] = 2;
-			nbpos[1] = 3;
-			nbpos[2] = 4;
-			nbpos[3] = 5;
-			nbpos[4] = 6;
-			nbpos[5] = 7;
-			nbpos[6] = 8;
-			break;
-		case 88:                               /* 88 bits, 24 pulses, 4 tracks */
-			if(ser_size > 462)
-				nbiter = 1;
-			else
-				nbiter = 2;                    /* 2x53x16=1696 loop */
+    switch (nbbits)
+    {
+        case 20:                               /* 20 bits, 4 pulses, 4 tracks */
+            nbiter = 4;                          /* 4x16x16=1024 loop */
+            alp = 8192;                          /* alp = 2.0 (Q12) */
+            nb_pulse = 4;
+            nbpos[0] = 4;
+            nbpos[1] = 8;
+            break;
+        case 36:                               /* 36 bits, 8 pulses, 4 tracks */
+            nbiter = 4;                          /* 4x20x16=1280 loop */
+            alp = 4096;                          /* alp = 1.0 (Q12) */
+            nb_pulse = 8;
+            nbpos[0] = 4;
+            nbpos[1] = 8;
+            nbpos[2] = 8;
+            break;
+        case 44:                               /* 44 bits, 10 pulses, 4 tracks */
+            nbiter = 4;                          /* 4x26x16=1664 loop */
+            alp = 4096;                          /* alp = 1.0 (Q12) */
+            nb_pulse = 10;
+            nbpos[0] = 4;
+            nbpos[1] = 6;
+            nbpos[2] = 8;
+            nbpos[3] = 8;
+            break;
+        case 52:                               /* 52 bits, 12 pulses, 4 tracks */
+            nbiter = 4;                          /* 4x26x16=1664 loop */
+            alp = 4096;                          /* alp = 1.0 (Q12) */
+            nb_pulse = 12;
+            nbpos[0] = 4;
+            nbpos[1] = 6;
+            nbpos[2] = 8;
+            nbpos[3] = 8;
+            break;
+        case 64:                               /* 64 bits, 16 pulses, 4 tracks */
+            nbiter = 3;                          /* 3x36x16=1728 loop */
+            alp = 3277;                          /* alp = 0.8 (Q12) */
+            nb_pulse = 16;
+            nbpos[0] = 4;
+            nbpos[1] = 4;
+            nbpos[2] = 6;
+            nbpos[3] = 6;
+            nbpos[4] = 8;
+            nbpos[5] = 8;
+            break;
+        case 72:                               /* 72 bits, 18 pulses, 4 tracks */
+            nbiter = 3;                          /* 3x35x16=1680 loop */
+            alp = 3072;                          /* alp = 0.75 (Q12) */
+            nb_pulse = 18;
+            nbpos[0] = 2;
+            nbpos[1] = 3;
+            nbpos[2] = 4;
+            nbpos[3] = 5;
+            nbpos[4] = 6;
+            nbpos[5] = 7;
+            nbpos[6] = 8;
+            break;
+        case 88:                               /* 88 bits, 24 pulses, 4 tracks */
+            if(ser_size > 462)
+                nbiter = 1;
+            else
+                nbiter = 2;                    /* 2x53x16=1696 loop */
 
-			alp = 2048;                          /* alp = 0.5 (Q12) */
-			nb_pulse = 24;
-			nbpos[0] = 2;
-			nbpos[1] = 2;
-			nbpos[2] = 3;
-			nbpos[3] = 4;
-			nbpos[4] = 5;
-			nbpos[5] = 6;
-			nbpos[6] = 7;
-			nbpos[7] = 8;
-			nbpos[8] = 8;
-			nbpos[9] = 8;
-			break;
-		default:
-			nbiter = 0;
-			alp = 0;
-			nb_pulse = 0;
-	}
+            alp = 2048;                          /* alp = 0.5 (Q12) */
+            nb_pulse = 24;
+            nbpos[0] = 2;
+            nbpos[1] = 2;
+            nbpos[2] = 3;
+            nbpos[3] = 4;
+            nbpos[4] = 5;
+            nbpos[5] = 6;
+            nbpos[6] = 7;
+            nbpos[7] = 8;
+            nbpos[8] = 8;
+            nbpos[9] = 8;
+            break;
+        default:
+            nbiter = 0;
+            alp = 0;
+            nb_pulse = 0;
+    }
 
-	for (i = 0; i < nb_pulse; i++)
-	{
-		codvec[i] = i;
-	}
+    for (i = 0; i < nb_pulse; i++)
+    {
+        codvec[i] = i;
+    }
 
-	/*----------------------------------------------------------------*
-	 * Find sign for each pulse position.                             *
-	 *----------------------------------------------------------------*/
-	/* calculate energy for normalization of cn[] and dn[] */
-	/* set k_cn = 32..32767 (ener_cn = 2^30..256-0) */
+    /*----------------------------------------------------------------*
+     * Find sign for each pulse position.                             *
+     *----------------------------------------------------------------*/
+    /* calculate energy for normalization of cn[] and dn[] */
+    /* set k_cn = 32..32767 (ener_cn = 2^30..256-0) */
 #ifdef ASM_OPT                  /* asm optimization branch */
-	s = Dot_product12_asm(cn, cn, L_SUBFR, &exp);
+    s = Dot_product12_asm(cn, cn, L_SUBFR, &exp);
 #else
-	s = Dot_product12(cn, cn, L_SUBFR, &exp);
+    s = Dot_product12(cn, cn, L_SUBFR, &exp);
 #endif
 
-	Isqrt_n(&s, &exp);
-	s = L_shl(s, (exp + 5));
-	k_cn = extract_h(L_add(s, 0x8000));
+    Isqrt_n(&s, &exp);
+    s = L_shl(s, (exp + 5));
+    k_cn = extract_h(L_add(s, 0x8000));
 
-	/* set k_dn = 32..512 (ener_dn = 2^30..2^22) */
+    /* set k_dn = 32..512 (ener_dn = 2^30..2^22) */
 #ifdef ASM_OPT                      /* asm optimization branch */
-	s = Dot_product12_asm(dn, dn, L_SUBFR, &exp);
+    s = Dot_product12_asm(dn, dn, L_SUBFR, &exp);
 #else
-	s = Dot_product12(dn, dn, L_SUBFR, &exp);
+    s = Dot_product12(dn, dn, L_SUBFR, &exp);
 #endif
 
-	Isqrt_n(&s, &exp);
-	k_dn = (L_shl(s, (exp + 5 + 3)) + 0x8000) >> 16;    /* k_dn = 256..4096 */
-	k_dn = vo_mult_r(alp, k_dn);              /* alp in Q12 */
+    Isqrt_n(&s, &exp);
+    k_dn = voround(L_shl(s, (exp + 5 + 3)));    /* k_dn = 256..4096 */
+    k_dn = vo_mult_r(alp, k_dn);              /* alp in Q12 */
 
-	/* mix normalized cn[] and dn[] */
-	p0 = cn;
-	p1 = dn;
-	p2 = dn2;
+    /* mix normalized cn[] and dn[] */
+    p0 = cn;
+    p1 = dn;
+    p2 = dn2;
 
-	for (i = 0; i < L_SUBFR/4; i++)
-	{
-		s = (k_cn* (*p0++))+(k_dn * (*p1++));
-		*p2++ = s >> 7;
-		s = (k_cn* (*p0++))+(k_dn * (*p1++));
-		*p2++ = s >> 7;
-		s = (k_cn* (*p0++))+(k_dn * (*p1++));
-		*p2++ = s >> 7;
-		s = (k_cn* (*p0++))+(k_dn * (*p1++));
-		*p2++ = s >> 7;
-	}
+    for (i = 0; i < L_SUBFR/4; i++)
+    {
+        s = L_add((k_cn* (*p0++)), (k_dn * (*p1++)));
+        *p2++ = s >> 7;
+        s = L_add((k_cn* (*p0++)), (k_dn * (*p1++)));
+        *p2++ = s >> 7;
+        s = L_add((k_cn* (*p0++)), (k_dn * (*p1++)));
+        *p2++ = s >> 7;
+        s = L_add((k_cn* (*p0++)), (k_dn * (*p1++)));
+        *p2++ = s >> 7;
+    }
 
-	/* set sign according to dn2[] = k_cn*cn[] + k_dn*dn[]    */
-	for(i = 0; i < L_SUBFR; i++)
-	{
-		val = dn[i];
-		ps = dn2[i];
-		if (ps >= 0)
-		{
-			sign[i] = 32767;             /* sign = +1 (Q12) */
-			vec[i] = -32768;
-		} else
-		{
-			sign[i] = -32768;            /* sign = -1 (Q12) */
-			vec[i] = 32767;
-			dn[i] = -val;
-			dn2[i] = -ps;
-		}
-	}
-	/*----------------------------------------------------------------*
-	 * Select NB_MAX position per track according to max of dn2[].    *
-	 *----------------------------------------------------------------*/
-	pos = 0;
-	for (i = 0; i < NB_TRACK; i++)
-	{
-		for (k = 0; k < NB_MAX; k++)
-		{
-			ps = -1;
-			for (j = i; j < L_SUBFR; j += STEP)
-			{
-				if(dn2[j] > ps)
-				{
-					ps = dn2[j];
-					pos = j;
-				}
-			}
-			dn2[pos] = (k - NB_MAX);     /* dn2 < 0 when position is selected */
-			if (k == 0)
-			{
-				pos_max[i] = pos;
-			}
-		}
-	}
+    /* set sign according to dn2[] = k_cn*cn[] + k_dn*dn[]    */
+    for(i = 0; i < L_SUBFR; i++)
+    {
+        val = dn[i];
+        ps = dn2[i];
+        if (ps >= 0)
+        {
+            sign[i] = 32767;             /* sign = +1 (Q12) */
+            vec[i] = -32768;
+        } else
+        {
+            sign[i] = -32768;            /* sign = -1 (Q12) */
+            vec[i] = 32767;
+            dn[i] = -val;
+            dn2[i] = -ps;
+        }
+    }
+    /*----------------------------------------------------------------*
+     * Select NB_MAX position per track according to max of dn2[].    *
+     *----------------------------------------------------------------*/
+    pos = 0;
+    for (i = 0; i < NB_TRACK; i++)
+    {
+        for (k = 0; k < NB_MAX; k++)
+        {
+            ps = -1;
+            for (j = i; j < L_SUBFR; j += STEP)
+            {
+                if(dn2[j] > ps)
+                {
+                    ps = dn2[j];
+                    pos = j;
+                }
+            }
+            dn2[pos] = (k - NB_MAX);     /* dn2 < 0 when position is selected */
+            if (k == 0)
+            {
+                pos_max[i] = pos;
+            }
+        }
+    }
 
-	/*--------------------------------------------------------------*
-	 * Scale h[] to avoid overflow and to get maximum of precision  *
-	 * on correlation.                                              *
-	 *                                                              *
-	 * Maximum of h[] (h[0]) is fixed to 2048 (MAX16 / 16).         *
-	 *  ==> This allow addition of 16 pulses without saturation.    *
-	 *                                                              *
-	 * Energy worst case (on resonant impulse response),            *
-	 * - energy of h[] is approximately MAX/16.                     *
-	 * - During search, the energy is divided by 8 to avoid         *
-	 *   overflow on "alp". (energy of h[] = MAX/128).              *
-	 *  ==> "alp" worst case detected is 22854 on sinusoidal wave.  *
-	 *--------------------------------------------------------------*/
+    /*--------------------------------------------------------------*
+     * Scale h[] to avoid overflow and to get maximum of precision  *
+     * on correlation.                                              *
+     *                                                              *
+     * Maximum of h[] (h[0]) is fixed to 2048 (MAX16 / 16).         *
+     *  ==> This allow addition of 16 pulses without saturation.    *
+     *                                                              *
+     * Energy worst case (on resonant impulse response),            *
+     * - energy of h[] is approximately MAX/16.                     *
+     * - During search, the energy is divided by 8 to avoid         *
+     *   overflow on "alp". (energy of h[] = MAX/128).              *
+     *  ==> "alp" worst case detected is 22854 on sinusoidal wave.  *
+     *--------------------------------------------------------------*/
 
-	/* impulse response buffer for fast computation */
+    /* impulse response buffer for fast computation */
 
-	h = h_buf;
-	h_inv = h_buf + (2 * L_SUBFR);
-	L_tmp = 0;
-	for (i = 0; i < L_SUBFR; i++)
-	{
-		*h++ = 0;
-		*h_inv++ = 0;
-		L_tmp += (H[i] * H[i]) << 1;
-	}
-	/* scale h[] down (/2) when energy of h[] is high with many pulses used */
-	val = extract_h(L_tmp);
-	h_shift = 0;
+    h = h_buf;
+    h_inv = h_buf + (2 * L_SUBFR);
+    L_tmp = 0;
+    for (i = 0; i < L_SUBFR; i++)
+    {
+        *h++ = 0;
+        *h_inv++ = 0;
+        L_tmp = L_add(L_tmp, (H[i] * H[i]) << 1);
+    }
+    /* scale h[] down (/2) when energy of h[] is high with many pulses used */
+    val = extract_h(L_tmp);
+    h_shift = 0;
 
-	if ((nb_pulse >= 12) && (val > 1024))
-	{
-		h_shift = 1;
-	}
-	p0 = H;
-	p1 = h;
-	p2 = h_inv;
+    if ((nb_pulse >= 12) && (val > 1024))
+    {
+        h_shift = 1;
+    }
+    p0 = H;
+    p1 = h;
+    p2 = h_inv;
 
-	for (i = 0; i < L_SUBFR/4; i++)
-	{
-		*p1 = *p0++ >> h_shift;
-		*p2++ = -(*p1++);
-		*p1 = *p0++ >> h_shift;
-		*p2++ = -(*p1++);
-		*p1 = *p0++ >> h_shift;
-		*p2++ = -(*p1++);
-		*p1 = *p0++ >> h_shift;
-		*p2++ = -(*p1++);
-	}
+    for (i = 0; i < L_SUBFR/4; i++)
+    {
+        *p1 = *p0++ >> h_shift;
+        *p2++ = -(*p1++);
+        *p1 = *p0++ >> h_shift;
+        *p2++ = -(*p1++);
+        *p1 = *p0++ >> h_shift;
+        *p2++ = -(*p1++);
+        *p1 = *p0++ >> h_shift;
+        *p2++ = -(*p1++);
+    }
 
-	/*------------------------------------------------------------*
-	 * Compute rrixix[][] needed for the codebook search.         *
-	 * This algorithm compute impulse response energy of all      *
-	 * positions (16) in each track (4).       Total = 4x16 = 64. *
-	 *------------------------------------------------------------*/
+    /*------------------------------------------------------------*
+     * Compute rrixix[][] needed for the codebook search.         *
+     * This algorithm compute impulse response energy of all      *
+     * positions (16) in each track (4).       Total = 4x16 = 64. *
+     *------------------------------------------------------------*/
 
-	/* storage order --> i3i3, i2i2, i1i1, i0i0 */
+    /* storage order --> i3i3, i2i2, i1i1, i0i0 */
 
-	/* Init pointers to last position of rrixix[] */
-	p0 = &rrixix[0][NB_POS - 1];
-	p1 = &rrixix[1][NB_POS - 1];
-	p2 = &rrixix[2][NB_POS - 1];
-	p3 = &rrixix[3][NB_POS - 1];
+    /* Init pointers to last position of rrixix[] */
+    p0 = &rrixix[0][NB_POS - 1];
+    p1 = &rrixix[1][NB_POS - 1];
+    p2 = &rrixix[2][NB_POS - 1];
+    p3 = &rrixix[3][NB_POS - 1];
 
-	ptr_h1 = h;
-	cor = 0x00008000L;                             /* for rounding */
-	for (i = 0; i < NB_POS; i++)
-	{
-		cor += vo_L_mult((*ptr_h1), (*ptr_h1));
-		ptr_h1++;
-		*p3-- = extract_h(cor);
-		cor += vo_L_mult((*ptr_h1), (*ptr_h1));
-		ptr_h1++;
-		*p2-- = extract_h(cor);
-		cor += vo_L_mult((*ptr_h1), (*ptr_h1));
-		ptr_h1++;
-		*p1-- = extract_h(cor);
-		cor += vo_L_mult((*ptr_h1), (*ptr_h1));
-		ptr_h1++;
-		*p0-- = extract_h(cor);
-	}
+    ptr_h1 = h;
+    cor = 0x00008000L;                             /* for rounding */
+    for (i = 0; i < NB_POS; i++)
+    {
+        cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h1)));
+        ptr_h1++;
+        *p3-- = extract_h(cor);
+        cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h1)));
+        ptr_h1++;
+        *p2-- = extract_h(cor);
+        cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h1)));
+        ptr_h1++;
+        *p1-- = extract_h(cor);
+        cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h1)));
+        ptr_h1++;
+        *p0-- = extract_h(cor);
+    }
 
-	/*------------------------------------------------------------*
-	 * Compute rrixiy[][] needed for the codebook search.         *
-	 * This algorithm compute correlation between 2 pulses        *
-	 * (2 impulses responses) in 4 possible adjacents tracks.     *
-	 * (track 0-1, 1-2, 2-3 and 3-0).     Total = 4x16x16 = 1024. *
-	 *------------------------------------------------------------*/
+    /*------------------------------------------------------------*
+     * Compute rrixiy[][] needed for the codebook search.         *
+     * This algorithm compute correlation between 2 pulses        *
+     * (2 impulses responses) in 4 possible adjacents tracks.     *
+     * (track 0-1, 1-2, 2-3 and 3-0).     Total = 4x16x16 = 1024. *
+     *------------------------------------------------------------*/
 
-	/* storage order --> i2i3, i1i2, i0i1, i3i0 */
+    /* storage order --> i2i3, i1i2, i0i1, i3i0 */
 
-	pos = MSIZE - 1;
-	ptr_hf = h + 1;
+    pos = MSIZE - 1;
+    ptr_hf = h + 1;
 
-	for (k = 0; k < NB_POS; k++)
-	{
-		p3 = &rrixiy[2][pos];
-		p2 = &rrixiy[1][pos];
-		p1 = &rrixiy[0][pos];
-		p0 = &rrixiy[3][pos - NB_POS];
+    for (k = 0; k < NB_POS; k++)
+    {
+        p3 = &rrixiy[2][pos];
+        p2 = &rrixiy[1][pos];
+        p1 = &rrixiy[0][pos];
+        p0 = &rrixiy[3][pos - NB_POS];
 
-		cor = 0x00008000L;                   /* for rounding */
-		ptr_h1 = h;
-		ptr_h2 = ptr_hf;
+        cor = 0x00008000L;                   /* for rounding */
+        ptr_h1 = h;
+        ptr_h2 = ptr_hf;
 
-		for (i = k + 1; i < NB_POS; i++)
-		{
-			cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-			ptr_h1++;
-			ptr_h2++;
-			*p3 = extract_h(cor);
-			cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-			ptr_h1++;
-			ptr_h2++;
-			*p2 = extract_h(cor);
-			cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-			ptr_h1++;
-			ptr_h2++;
-			*p1 = extract_h(cor);
-			cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-			ptr_h1++;
-			ptr_h2++;
-			*p0 = extract_h(cor);
+        for (i = k + 1; i < NB_POS; i++)
+        {
+            cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+            ptr_h1++;
+            ptr_h2++;
+            *p3 = extract_h(cor);
+            cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+            ptr_h1++;
+            ptr_h2++;
+            *p2 = extract_h(cor);
+            cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+            ptr_h1++;
+            ptr_h2++;
+            *p1 = extract_h(cor);
+            cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+            ptr_h1++;
+            ptr_h2++;
+            *p0 = extract_h(cor);
 
-			p3 -= (NB_POS + 1);
-			p2 -= (NB_POS + 1);
-			p1 -= (NB_POS + 1);
-			p0 -= (NB_POS + 1);
-		}
-		cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-		ptr_h1++;
-		ptr_h2++;
-		*p3 = extract_h(cor);
-		cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-		ptr_h1++;
-		ptr_h2++;
-		*p2 = extract_h(cor);
-		cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-		ptr_h1++;
-		ptr_h2++;
-		*p1 = extract_h(cor);
+            p3 -= (NB_POS + 1);
+            p2 -= (NB_POS + 1);
+            p1 -= (NB_POS + 1);
+            p0 -= (NB_POS + 1);
+        }
+        cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+        ptr_h1++;
+        ptr_h2++;
+        *p3 = extract_h(cor);
+        cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+        ptr_h1++;
+        ptr_h2++;
+        *p2 = extract_h(cor);
+        cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+        ptr_h1++;
+        ptr_h2++;
+        *p1 = extract_h(cor);
 
-		pos -= NB_POS;
-		ptr_hf += STEP;
-	}
+        pos -= NB_POS;
+        ptr_hf += STEP;
+    }
 
-	/* storage order --> i3i0, i2i3, i1i2, i0i1 */
+    /* storage order --> i3i0, i2i3, i1i2, i0i1 */
 
-	pos = MSIZE - 1;
-	ptr_hf = h + 3;
+    pos = MSIZE - 1;
+    ptr_hf = h + 3;
 
-	for (k = 0; k < NB_POS; k++)
-	{
-		p3 = &rrixiy[3][pos];
-		p2 = &rrixiy[2][pos - 1];
-		p1 = &rrixiy[1][pos - 1];
-		p0 = &rrixiy[0][pos - 1];
+    for (k = 0; k < NB_POS; k++)
+    {
+        p3 = &rrixiy[3][pos];
+        p2 = &rrixiy[2][pos - 1];
+        p1 = &rrixiy[1][pos - 1];
+        p0 = &rrixiy[0][pos - 1];
 
-		cor = 0x00008000L;								/* for rounding */
-		ptr_h1 = h;
-		ptr_h2 = ptr_hf;
+        cor = 0x00008000L;                              /* for rounding */
+        ptr_h1 = h;
+        ptr_h2 = ptr_hf;
 
-		for (i = k + 1; i < NB_POS; i++)
-		{
-			cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-			ptr_h1++;
-			ptr_h2++;
-			*p3 = extract_h(cor);
-			cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-			ptr_h1++;
-			ptr_h2++;
-			*p2 = extract_h(cor);
-			cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-			ptr_h1++;
-			ptr_h2++;
-			*p1 = extract_h(cor);
-			cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-			ptr_h1++;
-			ptr_h2++;
-			*p0 = extract_h(cor);
+        for (i = k + 1; i < NB_POS; i++)
+        {
+            cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+            ptr_h1++;
+            ptr_h2++;
+            *p3 = extract_h(cor);
+            cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+            ptr_h1++;
+            ptr_h2++;
+            *p2 = extract_h(cor);
+            cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+            ptr_h1++;
+            ptr_h2++;
+            *p1 = extract_h(cor);
+            cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+            ptr_h1++;
+            ptr_h2++;
+            *p0 = extract_h(cor);
 
-			p3 -= (NB_POS + 1);
-			p2 -= (NB_POS + 1);
-			p1 -= (NB_POS + 1);
-			p0 -= (NB_POS + 1);
-		}
-		cor += vo_L_mult((*ptr_h1), (*ptr_h2));
-		ptr_h1++;
-		ptr_h2++;
-		*p3 = extract_h(cor);
+            p3 -= (NB_POS + 1);
+            p2 -= (NB_POS + 1);
+            p1 -= (NB_POS + 1);
+            p0 -= (NB_POS + 1);
+        }
+        cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
+        ptr_h1++;
+        ptr_h2++;
+        *p3 = extract_h(cor);
 
-		pos--;
-		ptr_hf += STEP;
-	}
+        pos--;
+        ptr_hf += STEP;
+    }
 
-	/*------------------------------------------------------------*
-	 * Modification of rrixiy[][] to take signs into account.     *
-	 *------------------------------------------------------------*/
+    /*------------------------------------------------------------*
+     * Modification of rrixiy[][] to take signs into account.     *
+     *------------------------------------------------------------*/
 
-	p0 = &rrixiy[0][0];
+    p0 = &rrixiy[0][0];
 
-	for (k = 0; k < NB_TRACK; k++)
-	{
-		j_temp = (k + 1)&0x03;
-		for (i = k; i < L_SUBFR; i += STEP)
-		{
-			psign = sign;
-			if (psign[i] < 0)
-			{
-				psign = vec;
-			}
-			j = j_temp;
-			for (; j < L_SUBFR; j += STEP)
-			{
-				*p0 = vo_mult(*p0, psign[j]);
-				p0++;
-			}
-		}
-	}
+    for (k = 0; k < NB_TRACK; k++)
+    {
+        j_temp = (k + 1)&0x03;
+        for (i = k; i < L_SUBFR; i += STEP)
+        {
+            psign = sign;
+            if (psign[i] < 0)
+            {
+                psign = vec;
+            }
+            j = j_temp;
+            for (; j < L_SUBFR; j += STEP)
+            {
+                *p0 = vo_mult(*p0, psign[j]);
+                p0++;
+            }
+        }
+    }
 
-	/*-------------------------------------------------------------------*
-	 *                       Deep first search                           *
-	 *-------------------------------------------------------------------*/
+    /*-------------------------------------------------------------------*
+     *                       Deep first search                           *
+     *-------------------------------------------------------------------*/
 
-	psk = -1;
-	alpk = 1;
+    psk = -1;
+    alpk = 1;
 
-	for (k = 0; k < nbiter; k++)
-	{
-		j_temp = k<<2;
-		for (i = 0; i < nb_pulse; i++)
-			ipos[i] = tipos[j_temp + i];
+    for (k = 0; k < nbiter; k++)
+    {
+        j_temp = k<<2;
+        for (i = 0; i < nb_pulse; i++)
+            ipos[i] = tipos[j_temp + i];
 
-		if(nbbits == 20)
-		{
-			pos = 0;
-			ps = 0;
-			alp = 0;
-			for (i = 0; i < L_SUBFR; i++)
-			{
-				vec[i] = 0;
-			}
-		} else if ((nbbits == 36) || (nbbits == 44))
-		{
-			/* first stage: fix 2 pulses */
-			pos = 2;
+        if(nbbits == 20)
+        {
+            pos = 0;
+            ps = 0;
+            alp = 0;
+            for (i = 0; i < L_SUBFR; i++)
+            {
+                vec[i] = 0;
+            }
+        } else if ((nbbits == 36) || (nbbits == 44))
+        {
+            /* first stage: fix 2 pulses */
+            pos = 2;
 
-			ix = ind[0] = pos_max[ipos[0]];
-			iy = ind[1] = pos_max[ipos[1]];
-			ps = dn[ix] + dn[iy];
-			i = ix >> 2;                /* ix / STEP */
-			j = iy >> 2;                /* iy / STEP */
-			s = rrixix[ipos[0]][i] << 13;
-			s += rrixix[ipos[1]][j] << 13;
-			i = (i << 4) + j;         /* (ix/STEP)*NB_POS + (iy/STEP) */
-			s += rrixiy[ipos[0]][i] << 14;
-			alp = (s + 0x8000) >> 16;
-			if (sign[ix] < 0)
-				p0 = h_inv - ix;
-			else
-				p0 = h - ix;
-			if (sign[iy] < 0)
-				p1 = h_inv - iy;
-			else
-				p1 = h - iy;
+            ix = ind[0] = pos_max[ipos[0]];
+            iy = ind[1] = pos_max[ipos[1]];
+            ps = dn[ix] + dn[iy];
+            i = ix >> 2;                /* ix / STEP */
+            j = iy >> 2;                /* iy / STEP */
+            s = rrixix[ipos[0]][i] << 13;
+            s += rrixix[ipos[1]][j] << 13;
+            i = (i << 4) + j;         /* (ix/STEP)*NB_POS + (iy/STEP) */
+            s += rrixiy[ipos[0]][i] << 14;
+            alp = (s + 0x8000) >> 16;
+            if (sign[ix] < 0)
+                p0 = h_inv - ix;
+            else
+                p0 = h - ix;
+            if (sign[iy] < 0)
+                p1 = h_inv - iy;
+            else
+                p1 = h - iy;
 
-			for (i = 0; i < L_SUBFR; i++)
-			{
-				vec[i] = (*p0++) + (*p1++);
-			}
+            for (i = 0; i < L_SUBFR; i++)
+            {
+                vec[i] = (*p0++) + (*p1++);
+            }
 
-			if(nbbits == 44)
-			{
-				ipos[8] = 0;
-				ipos[9] = 1;
-			}
-		} else
-		{
-			/* first stage: fix 4 pulses */
-			pos = 4;
+            if(nbbits == 44)
+            {
+                ipos[8] = 0;
+                ipos[9] = 1;
+            }
+        } else
+        {
+            /* first stage: fix 4 pulses */
+            pos = 4;
 
-			ix = ind[0] = pos_max[ipos[0]];
-			iy = ind[1] = pos_max[ipos[1]];
-			i = ind[2] = pos_max[ipos[2]];
-			j = ind[3] = pos_max[ipos[3]];
-			ps = add1(add1(add1(dn[ix], dn[iy]), dn[i]), dn[j]);
+            ix = ind[0] = pos_max[ipos[0]];
+            iy = ind[1] = pos_max[ipos[1]];
+            i = ind[2] = pos_max[ipos[2]];
+            j = ind[3] = pos_max[ipos[3]];
+            ps = add1(add1(add1(dn[ix], dn[iy]), dn[i]), dn[j]);
 
-			if (sign[ix] < 0)
-				p0 = h_inv - ix;
-			else
-				p0 = h - ix;
+            if (sign[ix] < 0)
+                p0 = h_inv - ix;
+            else
+                p0 = h - ix;
 
-			if (sign[iy] < 0)
-				p1 = h_inv - iy;
-			else
-				p1 = h - iy;
+            if (sign[iy] < 0)
+                p1 = h_inv - iy;
+            else
+                p1 = h - iy;
 
-			if (sign[i] < 0)
-				p2 = h_inv - i;
-			else
-				p2 = h - i;
+            if (sign[i] < 0)
+                p2 = h_inv - i;
+            else
+                p2 = h - i;
 
-			if (sign[j] < 0)
-				p3 = h_inv - j;
-			else
-				p3 = h - j;
+            if (sign[j] < 0)
+                p3 = h_inv - j;
+            else
+                p3 = h - j;
 
-			L_tmp = 0L;
-			for(i = 0; i < L_SUBFR; i++)
-			{
-				vec[i]  = add1(add1(add1(*p0++, *p1++), *p2++), *p3++);
-				L_tmp  += (vec[i] * vec[i]) << 1;
-			}
+            L_tmp = 0L;
+            for(i = 0; i < L_SUBFR; i++)
+            {
+                Word32 vecSq2;
+                vec[i]  = add1(add1(add1(*p0++, *p1++), *p2++), *p3++);
+                vecSq2 = (vec[i] * vec[i]) << 1;
+                if (vecSq2 > 0 && L_tmp > INT_MAX - vecSq2) {
+                    L_tmp = INT_MAX;
+                } else if (vecSq2 < 0 && L_tmp < INT_MIN - vecSq2) {
+                    L_tmp = INT_MIN;
+                } else {
+                    L_tmp  += vecSq2;
+                }
+            }
 
-			alp = ((L_tmp >> 3) + 0x8000) >> 16;
+            alp = ((L_tmp >> 3) + 0x8000) >> 16;
 
-			if(nbbits == 72)
-			{
-				ipos[16] = 0;
-				ipos[17] = 1;
-			}
-		}
+            if(nbbits == 72)
+            {
+                ipos[16] = 0;
+                ipos[17] = 1;
+            }
+        }
 
-		/* other stages of 2 pulses */
+        /* other stages of 2 pulses */
 
-		for (j = pos, st = 0; j < nb_pulse; j += 2, st++)
-		{
-			/*--------------------------------------------------*
-			 * Calculate correlation of all possible positions  *
-			 * of the next 2 pulses with previous fixed pulses. *
-			 * Each pulse can have 16 possible positions.       *
-			 *--------------------------------------------------*/
-			if(ipos[j] == 3)
-			{
-				cor_h_vec_30(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
-			}
-			else
-			{
+        for (j = pos, st = 0; j < nb_pulse; j += 2, st++)
+        {
+            /*--------------------------------------------------*
+             * Calculate correlation of all possible positions  *
+             * of the next 2 pulses with previous fixed pulses. *
+             * Each pulse can have 16 possible positions.       *
+             *--------------------------------------------------*/
+            if(ipos[j] == 3)
+            {
+                cor_h_vec_30(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
+            }
+            else
+            {
 #ifdef ASM_OPT                 /* asm optimization branch */
-				cor_h_vec_012_asm(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
+                cor_h_vec_012_asm(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
 #else
-				cor_h_vec_012(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
+                cor_h_vec_012(h, vec, ipos[j], sign, rrixix, cor_x, cor_y);
 #endif
-			}
-			/*--------------------------------------------------*
-			 * Find best positions of 2 pulses.                 *
-			 *--------------------------------------------------*/
-			search_ixiy(nbpos[st], ipos[j], ipos[j + 1], &ps, &alp,
-					&ix, &iy, dn, dn2, cor_x, cor_y, rrixiy);
+            }
+            /*--------------------------------------------------*
+             * Find best positions of 2 pulses.                 *
+             *--------------------------------------------------*/
+            search_ixiy(nbpos[st], ipos[j], ipos[j + 1], &ps, &alp,
+                    &ix, &iy, dn, dn2, cor_x, cor_y, rrixiy);
 
-			ind[j] = ix;
-			ind[j + 1] = iy;
+            ind[j] = ix;
+            ind[j + 1] = iy;
 
-			if (sign[ix] < 0)
-				p0 = h_inv - ix;
-			else
-				p0 = h - ix;
-			if (sign[iy] < 0)
-				p1 = h_inv - iy;
-			else
-				p1 = h - iy;
+            if (sign[ix] < 0)
+                p0 = h_inv - ix;
+            else
+                p0 = h - ix;
+            if (sign[iy] < 0)
+                p1 = h_inv - iy;
+            else
+                p1 = h - iy;
 
-			for (i = 0; i < L_SUBFR; i+=4)
-			{
-				vec[i]   += add1((*p0++), (*p1++));
-				vec[i+1] += add1((*p0++), (*p1++));
-				vec[i+2] += add1((*p0++), (*p1++));
-				vec[i+3] += add1((*p0++), (*p1++));
-			}
-		}
-		/* memorise the best codevector */
-		ps = vo_mult(ps, ps);
-		s = vo_L_msu(vo_L_mult(alpk, ps), psk, alp);
-		if (s > 0)
-		{
-			psk = ps;
-			alpk = alp;
-			for (i = 0; i < nb_pulse; i++)
-			{
-				codvec[i] = ind[i];
-			}
-			for (i = 0; i < L_SUBFR; i++)
-			{
-				y[i] = vec[i];
-			}
-		}
-	}
-	/*-------------------------------------------------------------------*
-	 * Build the codeword, the filtered codeword and index of codevector.*
-	 *-------------------------------------------------------------------*/
-	for (i = 0; i < NPMAXPT * NB_TRACK; i++)
-	{
-		ind[i] = -1;
-	}
-	for (i = 0; i < L_SUBFR; i++)
-	{
-		code[i] = 0;
-		y[i] = vo_shr_r(y[i], 3);               /* Q12 to Q9 */
-	}
-	val = (512 >> h_shift);               /* codeword in Q9 format */
-	for (k = 0; k < nb_pulse; k++)
-	{
-		i = codvec[k];                       /* read pulse position */
-		j = sign[i];                         /* read sign           */
-		index = i >> 2;                 /* index = pos of pulse (0..15) */
-		track = (Word16) (i & 0x03);         /* track = i % NB_TRACK (0..3)  */
+            for (i = 0; i < L_SUBFR; i+=4)
+            {
+                vec[i]   += add1((*p0++), (*p1++));
+                vec[i+1] += add1((*p0++), (*p1++));
+                vec[i+2] += add1((*p0++), (*p1++));
+                vec[i+3] += add1((*p0++), (*p1++));
+            }
+        }
+        /* memorise the best codevector */
+        ps = vo_mult(ps, ps);
+        s = L_sub(vo_L_mult(alpk, ps), vo_L_mult(psk, alp));
+        if (s > 0)
+        {
+            psk = ps;
+            alpk = alp;
+            for (i = 0; i < nb_pulse; i++)
+            {
+                codvec[i] = ind[i];
+            }
+            for (i = 0; i < L_SUBFR; i++)
+            {
+                y[i] = vec[i];
+            }
+        }
+    }
+    /*-------------------------------------------------------------------*
+     * Build the codeword, the filtered codeword and index of codevector.*
+     *-------------------------------------------------------------------*/
+    for (i = 0; i < NPMAXPT * NB_TRACK; i++)
+    {
+        ind[i] = -1;
+    }
+    for (i = 0; i < L_SUBFR; i++)
+    {
+        code[i] = 0;
+        y[i] = vo_shr_r(y[i], 3);               /* Q12 to Q9 */
+    }
+    val = (512 >> h_shift);               /* codeword in Q9 format */
+    for (k = 0; k < nb_pulse; k++)
+    {
+        i = codvec[k];                       /* read pulse position */
+        j = sign[i];                         /* read sign           */
+        index = i >> 2;                 /* index = pos of pulse (0..15) */
+        track = (Word16) (i & 0x03);         /* track = i % NB_TRACK (0..3)  */
 
-		if (j > 0)
-		{
-			code[i] += val;
-			codvec[k] += 128;
-		} else
-		{
-			code[i] -= val;
-			index += NB_POS;
-		}
+        if (j > 0)
+        {
+            code[i] += val;
+            codvec[k] += 128;
+        } else
+        {
+            code[i] -= val;
+            index += NB_POS;
+        }
 
-		i = (Word16)((vo_L_mult(track, NPMAXPT) >> 1));
+        i = (Word16)((vo_L_mult(track, NPMAXPT) >> 1));
 
-		while (ind[i] >= 0)
-		{
-			i += 1;
-		}
-		ind[i] = index;
-	}
+        while (ind[i] >= 0)
+        {
+            i += 1;
+        }
+        ind[i] = index;
+    }
 
-	k = 0;
-	/* Build index of codevector */
-	if(nbbits == 20)
-	{
-		for (track = 0; track < NB_TRACK; track++)
-		{
-			_index[track] = (Word16)(quant_1p_N1(ind[k], 4));
-			k += NPMAXPT;
-		}
-	} else if(nbbits == 36)
-	{
-		for (track = 0; track < NB_TRACK; track++)
-		{
-			_index[track] = (Word16)(quant_2p_2N1(ind[k], ind[k + 1], 4));
-			k += NPMAXPT;
-		}
-	} else if(nbbits == 44)
-	{
-		for (track = 0; track < NB_TRACK - 2; track++)
-		{
-			_index[track] = (Word16)(quant_3p_3N1(ind[k], ind[k + 1], ind[k + 2], 4));
-			k += NPMAXPT;
-		}
-		for (track = 2; track < NB_TRACK; track++)
-		{
-			_index[track] = (Word16)(quant_2p_2N1(ind[k], ind[k + 1], 4));
-			k += NPMAXPT;
-		}
-	} else if(nbbits == 52)
-	{
-		for (track = 0; track < NB_TRACK; track++)
-		{
-			_index[track] = (Word16)(quant_3p_3N1(ind[k], ind[k + 1], ind[k + 2], 4));
-			k += NPMAXPT;
-		}
-	} else if(nbbits == 64)
-	{
-		for (track = 0; track < NB_TRACK; track++)
-		{
-			L_index = quant_4p_4N(&ind[k], 4);
-			_index[track] = (Word16)((L_index >> 14) & 3);
-			_index[track + NB_TRACK] = (Word16)(L_index & 0x3FFF);
-			k += NPMAXPT;
-		}
-	} else if(nbbits == 72)
-	{
-		for (track = 0; track < NB_TRACK - 2; track++)
-		{
-			L_index = quant_5p_5N(&ind[k], 4);
-			_index[track] = (Word16)((L_index >> 10) & 0x03FF);
-			_index[track + NB_TRACK] = (Word16)(L_index & 0x03FF);
-			k += NPMAXPT;
-		}
-		for (track = 2; track < NB_TRACK; track++)
-		{
-			L_index = quant_4p_4N(&ind[k], 4);
-			_index[track] = (Word16)((L_index >> 14) & 3);
-			_index[track + NB_TRACK] = (Word16)(L_index & 0x3FFF);
-			k += NPMAXPT;
-		}
-	} else if(nbbits == 88)
-	{
-		for (track = 0; track < NB_TRACK; track++)
-		{
-			L_index = quant_6p_6N_2(&ind[k], 4);
-			_index[track] = (Word16)((L_index >> 11) & 0x07FF);
-			_index[track + NB_TRACK] = (Word16)(L_index & 0x07FF);
-			k += NPMAXPT;
-		}
-	}
-	return;
+    k = 0;
+    /* Build index of codevector */
+    if(nbbits == 20)
+    {
+        for (track = 0; track < NB_TRACK; track++)
+        {
+            _index[track] = (Word16)(quant_1p_N1(ind[k], 4));
+            k += NPMAXPT;
+        }
+    } else if(nbbits == 36)
+    {
+        for (track = 0; track < NB_TRACK; track++)
+        {
+            _index[track] = (Word16)(quant_2p_2N1(ind[k], ind[k + 1], 4));
+            k += NPMAXPT;
+        }
+    } else if(nbbits == 44)
+    {
+        for (track = 0; track < NB_TRACK - 2; track++)
+        {
+            _index[track] = (Word16)(quant_3p_3N1(ind[k], ind[k + 1], ind[k + 2], 4));
+            k += NPMAXPT;
+        }
+        for (track = 2; track < NB_TRACK; track++)
+        {
+            _index[track] = (Word16)(quant_2p_2N1(ind[k], ind[k + 1], 4));
+            k += NPMAXPT;
+        }
+    } else if(nbbits == 52)
+    {
+        for (track = 0; track < NB_TRACK; track++)
+        {
+            _index[track] = (Word16)(quant_3p_3N1(ind[k], ind[k + 1], ind[k + 2], 4));
+            k += NPMAXPT;
+        }
+    } else if(nbbits == 64)
+    {
+        for (track = 0; track < NB_TRACK; track++)
+        {
+            L_index = quant_4p_4N(&ind[k], 4);
+            _index[track] = (Word16)((L_index >> 14) & 3);
+            _index[track + NB_TRACK] = (Word16)(L_index & 0x3FFF);
+            k += NPMAXPT;
+        }
+    } else if(nbbits == 72)
+    {
+        for (track = 0; track < NB_TRACK - 2; track++)
+        {
+            L_index = quant_5p_5N(&ind[k], 4);
+            _index[track] = (Word16)((L_index >> 10) & 0x03FF);
+            _index[track + NB_TRACK] = (Word16)(L_index & 0x03FF);
+            k += NPMAXPT;
+        }
+        for (track = 2; track < NB_TRACK; track++)
+        {
+            L_index = quant_4p_4N(&ind[k], 4);
+            _index[track] = (Word16)((L_index >> 14) & 3);
+            _index[track + NB_TRACK] = (Word16)(L_index & 0x3FFF);
+            k += NPMAXPT;
+        }
+    } else if(nbbits == 88)
+    {
+        for (track = 0; track < NB_TRACK; track++)
+        {
+            L_index = quant_6p_6N_2(&ind[k], 4);
+            _index[track] = (Word16)((L_index >> 11) & 0x07FF);
+            _index[track + NB_TRACK] = (Word16)(L_index & 0x07FF);
+            k += NPMAXPT;
+        }
+    }
+    return;
 }
 
 
@@ -824,135 +832,135 @@
  * Compute correlations of h[] with vec[] for the specified track.   *
  *-------------------------------------------------------------------*/
 void cor_h_vec_30(
-		Word16 h[],                           /* (i) scaled impulse response                 */
-		Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
-		Word16 track,                         /* (i) track to use                            */
-		Word16 sign[],                        /* (i) sign vector                             */
-		Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
-		Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
-		Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
-		)
+        Word16 h[],                           /* (i) scaled impulse response                 */
+        Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
+        Word16 track,                         /* (i) track to use                            */
+        Word16 sign[],                        /* (i) sign vector                             */
+        Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
+        Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
+        Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
+        )
 {
-	Word32 i, j, pos, corr;
-	Word16 *p0, *p1, *p2,*p3,*cor_x,*cor_y;
-	Word32 L_sum1,L_sum2;
-	cor_x = cor_1;
-	cor_y = cor_2;
-	p0 = rrixix[track];
-	p3 = rrixix[0];
-	pos = track;
+    Word32 i, j, pos, corr;
+    Word16 *p0, *p1, *p2,*p3,*cor_x,*cor_y;
+    Word32 L_sum1,L_sum2;
+    cor_x = cor_1;
+    cor_y = cor_2;
+    p0 = rrixix[track];
+    p3 = rrixix[0];
+    pos = track;
 
-	for (i = 0; i < NB_POS; i+=2)
-	{
-		L_sum1 = L_sum2 = 0L;
-		p1 = h;
-		p2 = &vec[pos];
-		for (j=pos;j < L_SUBFR; j++)
-		{
-			L_sum1 += *p1 * *p2;
-			p2-=3;
-			L_sum2 += *p1++ * *p2;
-			p2+=4;
-		}
-		p2-=3;
-		L_sum2 += *p1++ * *p2++;
-		L_sum2 += *p1++ * *p2++;
-		L_sum2 += *p1++ * *p2++;
+    for (i = 0; i < NB_POS; i+=2)
+    {
+        L_sum1 = L_sum2 = 0L;
+        p1 = h;
+        p2 = &vec[pos];
+        for (j=pos;j < L_SUBFR; j++)
+        {
+            L_sum1 = L_add(L_sum1, *p1 * *p2);
+            p2-=3;
+            L_sum2 = L_add(L_sum2, *p1++ * *p2);
+            p2+=4;
+        }
+        p2-=3;
+        L_sum2 = L_add(L_sum2, *p1++ * *p2++);
+        L_sum2 = L_add(L_sum2, *p1++ * *p2++);
+        L_sum2 = L_add(L_sum2, *p1++ * *p2++);
 
-		L_sum1 = (L_sum1 << 2);
-		L_sum2 = (L_sum2 << 2);
+        L_sum1 = L_shl(L_sum1, 2);
+        L_sum2 = L_shl(L_sum2, 2);
 
-		corr = vo_round(L_sum1);
-		*cor_x++ = vo_mult(corr, sign[pos]) + (*p0++);
-		corr = vo_round(L_sum2);
-		*cor_y++ = vo_mult(corr, sign[pos-3]) + (*p3++);
-		pos += STEP;
+        corr = voround(L_sum1);
+        *cor_x++ = mult(corr, sign[pos]) + (*p0++);
+        corr = voround(L_sum2);
+        *cor_y++ = mult(corr, sign[pos-3]) + (*p3++);
+        pos += STEP;
 
-		L_sum1 = L_sum2 = 0L;
-		p1 = h;
-		p2 = &vec[pos];
-		for (j=pos;j < L_SUBFR; j++)
-		{
-			L_sum1 += *p1 * *p2;
-			p2-=3;
-			L_sum2 += *p1++ * *p2;
-			p2+=4;
-		}
-		p2-=3;
-		L_sum2 += *p1++ * *p2++;
-		L_sum2 += *p1++ * *p2++;
-		L_sum2 += *p1++ * *p2++;
+        L_sum1 = L_sum2 = 0L;
+        p1 = h;
+        p2 = &vec[pos];
+        for (j=pos;j < L_SUBFR; j++)
+        {
+            L_sum1 = L_add(L_sum1, *p1 * *p2);
+            p2-=3;
+            L_sum2 = L_add(L_sum2, *p1++ * *p2);
+            p2+=4;
+        }
+        p2-=3;
+        L_sum2 = L_add(L_sum2, *p1++ * *p2++);
+        L_sum2 = L_add(L_sum2, *p1++ * *p2++);
+        L_sum2 = L_add(L_sum2, *p1++ * *p2++);
 
-		L_sum1 = (L_sum1 << 2);
-		L_sum2 = (L_sum2 << 2);
+        L_sum1 = L_shl(L_sum1, 2);
+        L_sum2 = L_shl(L_sum2, 2);
 
-		corr = vo_round(L_sum1);
-		*cor_x++ = vo_mult(corr, sign[pos]) + (*p0++);
-		corr = vo_round(L_sum2);
-		*cor_y++ = vo_mult(corr, sign[pos-3]) + (*p3++);
-		pos += STEP;
-	}
-	return;
+        corr = voround(L_sum1);
+        *cor_x++ = mult(corr, sign[pos]) + (*p0++);
+        corr = voround(L_sum2);
+        *cor_y++ = mult(corr, sign[pos-3]) + (*p3++);
+        pos += STEP;
+    }
+    return;
 }
 
 void cor_h_vec_012(
-		Word16 h[],                           /* (i) scaled impulse response                 */
-		Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
-		Word16 track,                         /* (i) track to use                            */
-		Word16 sign[],                        /* (i) sign vector                             */
-		Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
-		Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
-		Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
-		)
+        Word16 h[],                           /* (i) scaled impulse response                 */
+        Word16 vec[],                         /* (i) scaled vector (/8) to correlate with h[] */
+        Word16 track,                         /* (i) track to use                            */
+        Word16 sign[],                        /* (i) sign vector                             */
+        Word16 rrixix[][NB_POS],              /* (i) correlation of h[x] with h[x]      */
+        Word16 cor_1[],                       /* (o) result of correlation (NB_POS elements) */
+        Word16 cor_2[]                        /* (o) result of correlation (NB_POS elements) */
+        )
 {
-	Word32 i, j, pos, corr;
-	Word16 *p0, *p1, *p2,*p3,*cor_x,*cor_y;
-	Word32 L_sum1,L_sum2;
-	cor_x = cor_1;
-	cor_y = cor_2;
-	p0 = rrixix[track];
-	p3 = rrixix[track+1];
-	pos = track;
+    Word32 i, j, pos, corr;
+    Word16 *p0, *p1, *p2,*p3,*cor_x,*cor_y;
+    Word32 L_sum1,L_sum2;
+    cor_x = cor_1;
+    cor_y = cor_2;
+    p0 = rrixix[track];
+    p3 = rrixix[track+1];
+    pos = track;
 
-	for (i = 0; i < NB_POS; i+=2)
-	{
-		L_sum1 = L_sum2 = 0L;
-		p1 = h;
-		p2 = &vec[pos];
-		for (j=62-pos ;j >= 0; j--)
-		{
-			L_sum1 += *p1 * *p2++;
-			L_sum2 += *p1++ * *p2;
-		}
-		L_sum1 += *p1 * *p2;
-		L_sum1 = (L_sum1 << 2);
-		L_sum2 = (L_sum2 << 2);
+    for (i = 0; i < NB_POS; i+=2)
+    {
+        L_sum1 = L_sum2 = 0L;
+        p1 = h;
+        p2 = &vec[pos];
+        for (j=62-pos ;j >= 0; j--)
+        {
+            L_sum1 = L_add(L_sum1, *p1 * *p2++);
+            L_sum2 = L_add(L_sum2, *p1++ * *p2);
+        }
+        L_sum1 = L_add(L_sum1, *p1 * *p2);
+        L_sum1 = L_shl(L_sum1, 2);
+        L_sum2 = L_shl(L_sum2, 2);
 
-		corr = (L_sum1 + 0x8000) >> 16;
-		cor_x[i] = vo_mult(corr, sign[pos]) + (*p0++);
-		corr = (L_sum2 + 0x8000) >> 16;
-		cor_y[i] = vo_mult(corr, sign[pos + 1]) + (*p3++);
-		pos += STEP;
+        corr = voround(L_sum1);
+        cor_x[i] = vo_mult(corr, sign[pos]) + (*p0++);
+        corr = voround(L_sum2);
+        cor_y[i] = vo_mult(corr, sign[pos + 1]) + (*p3++);
+        pos += STEP;
 
-		L_sum1 = L_sum2 = 0L;
-		p1 = h;
-		p2 = &vec[pos];
-		for (j= 62-pos;j >= 0; j--)
-		{
-			L_sum1 += *p1 * *p2++;
-			L_sum2 += *p1++ * *p2;
-		}
-		L_sum1 += *p1 * *p2;
-		L_sum1 = (L_sum1 << 2);
-		L_sum2 = (L_sum2 << 2);
+        L_sum1 = L_sum2 = 0L;
+        p1 = h;
+        p2 = &vec[pos];
+        for (j= 62-pos;j >= 0; j--)
+        {
+            L_sum1 = L_add(L_sum1, *p1 * *p2++);
+            L_sum2 = L_add(L_sum2, *p1++ * *p2);
+        }
+        L_sum1 = L_add(L_sum1, *p1 * *p2);
+        L_sum1 = L_shl(L_sum1, 2);
+        L_sum2 = L_shl(L_sum2, 2);
 
-		corr = (L_sum1 + 0x8000) >> 16;
-		cor_x[i+1] = vo_mult(corr, sign[pos]) + (*p0++);
-		corr = (L_sum2 + 0x8000) >> 16;
-		cor_y[i+1] = vo_mult(corr, sign[pos + 1]) + (*p3++);
-		pos += STEP;
-	}
-	return;
+        corr = voround(L_sum1);
+        cor_x[i+1] = vo_mult(corr, sign[pos]) + (*p0++);
+        corr = voround(L_sum2);
+        cor_y[i+1] = vo_mult(corr, sign[pos + 1]) + (*p3++);
+        pos += STEP;
+    }
+    return;
 }
 
 /*-------------------------------------------------------------------*
@@ -962,80 +970,80 @@
  *-------------------------------------------------------------------*/
 
 void search_ixiy(
-		Word16 nb_pos_ix,                     /* (i) nb of pos for pulse 1 (1..8)       */
-		Word16 track_x,                       /* (i) track of pulse 1                   */
-		Word16 track_y,                       /* (i) track of pulse 2                   */
-		Word16 * ps,                          /* (i/o) correlation of all fixed pulses  */
-		Word16 * alp,                         /* (i/o) energy of all fixed pulses       */
-		Word16 * ix,                          /* (o) position of pulse 1                */
-		Word16 * iy,                          /* (o) position of pulse 2                */
-		Word16 dn[],                          /* (i) corr. between target and h[]       */
-		Word16 dn2[],                         /* (i) vector of selected positions       */
-		Word16 cor_x[],                       /* (i) corr. of pulse 1 with fixed pulses */
-		Word16 cor_y[],                       /* (i) corr. of pulse 2 with fixed pulses */
-		Word16 rrixiy[][MSIZE]                /* (i) corr. of pulse 1 with pulse 2   */
-		)
+        Word16 nb_pos_ix,                     /* (i) nb of pos for pulse 1 (1..8)       */
+        Word16 track_x,                       /* (i) track of pulse 1                   */
+        Word16 track_y,                       /* (i) track of pulse 2                   */
+        Word16 * ps,                          /* (i/o) correlation of all fixed pulses  */
+        Word16 * alp,                         /* (i/o) energy of all fixed pulses       */
+        Word16 * ix,                          /* (o) position of pulse 1                */
+        Word16 * iy,                          /* (o) position of pulse 2                */
+        Word16 dn[],                          /* (i) corr. between target and h[]       */
+        Word16 dn2[],                         /* (i) vector of selected positions       */
+        Word16 cor_x[],                       /* (i) corr. of pulse 1 with fixed pulses */
+        Word16 cor_y[],                       /* (i) corr. of pulse 2 with fixed pulses */
+        Word16 rrixiy[][MSIZE]                /* (i) corr. of pulse 1 with pulse 2   */
+        )
 {
-	Word32 x, y, pos, thres_ix;
-	Word16 ps1, ps2, sq, sqk;
-	Word16 alp_16, alpk;
-	Word16 *p0, *p1, *p2;
-	Word32 s, alp0, alp1, alp2;
+    Word32 x, y, pos, thres_ix;
+    Word16 ps1, ps2, sq, sqk;
+    Word16 alp_16, alpk;
+    Word16 *p0, *p1, *p2;
+    Word32 s, alp0, alp1, alp2;
 
-	p0 = cor_x;
-	p1 = cor_y;
-	p2 = rrixiy[track_x];
+    p0 = cor_x;
+    p1 = cor_y;
+    p2 = rrixiy[track_x];
 
-	thres_ix = nb_pos_ix - NB_MAX;
+    thres_ix = nb_pos_ix - NB_MAX;
 
-	alp0 = L_deposit_h(*alp);
-	alp0 = (alp0 + 0x00008000L);       /* for rounding */
+    alp0 = L_deposit_h(*alp);
+    alp0 = (alp0 + 0x00008000L);       /* for rounding */
 
-	sqk = -1;
-	alpk = 1;
+    sqk = -1;
+    alpk = 1;
 
-	for (x = track_x; x < L_SUBFR; x += STEP)
-	{
-		ps1 = *ps + dn[x];
-		alp1 = alp0 + ((*p0++)<<13);
+    for (x = track_x; x < L_SUBFR; x += STEP)
+    {
+        ps1 = *ps + dn[x];
+        alp1 = L_add(alp0, ((*p0++)<<13));
 
-		if (dn2[x] < thres_ix)
-		{
-			pos = -1;
-			for (y = track_y; y < L_SUBFR; y += STEP)
-			{
-				ps2 = add1(ps1, dn[y]);
+        if (dn2[x] < thres_ix)
+        {
+            pos = -1;
+            for (y = track_y; y < L_SUBFR; y += STEP)
+            {
+                ps2 = add1(ps1, dn[y]);
 
-				alp2 = alp1 + ((*p1++)<<13);
-				alp2 = alp2 + ((*p2++)<<14);
-				alp_16 = extract_h(alp2);
-				sq = vo_mult(ps2, ps2);
-				s = vo_L_mult(alpk, sq) - ((sqk * alp_16)<<1);
+                alp2 = L_add(alp1, ((*p1++)<<13));
+                alp2 = L_add(alp2, ((*p2++)<<14));
+                alp_16 = extract_h(alp2);
+                sq = vo_mult(ps2, ps2);
+                s = L_sub(vo_L_mult(alpk, sq), L_mult(sqk, alp_16));
 
-				if (s > 0)
-				{
-					sqk = sq;
-					alpk = alp_16;
-					pos = y;
-				}
-			}
-			p1 -= NB_POS;
+                if (s > 0)
+                {
+                    sqk = sq;
+                    alpk = alp_16;
+                    pos = y;
+                }
+            }
+            p1 -= NB_POS;
 
-			if (pos >= 0)
-			{
-				*ix = x;
-				*iy = pos;
-			}
-		} else
-		{
-			p2 += NB_POS;
-		}
-	}
+            if (pos >= 0)
+            {
+                *ix = x;
+                *iy = pos;
+            }
+        } else
+        {
+            p2 += NB_POS;
+        }
+    }
 
-	*ps = add1(*ps, add1(dn[*ix], dn[*iy]));
-	*alp = alpk;
+    *ps = add1(*ps, add1(dn[*ix], dn[*iy]));
+    *alp = alpk;
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/convolve.c b/media/libstagefright/codecs/amrwbenc/src/convolve.c
index 4c1f7d4..8c24414 100644
--- a/media/libstagefright/codecs/amrwbenc/src/convolve.c
+++ b/media/libstagefright/codecs/amrwbenc/src/convolve.c
@@ -17,8 +17,8 @@
 /***********************************************************************
        File: convolve.c
 
-	   Description:Perform the convolution between two vectors x[] and h[]
-	               and write the result in the vector y[]
+       Description:Perform the convolution between two vectors x[] and h[]
+                   and write the result in the vector y[]
 
 ************************************************************************/
 
@@ -28,85 +28,90 @@
 #define UNUSED(x) (void)(x)
 
 void Convolve (
-		Word16 x[],        /* (i)     : input vector                           */
-		Word16 h[],        /* (i)     : impulse response                       */
-		Word16 y[],        /* (o)     : output vector                          */
-		Word16 L           /* (i)     : vector size                            */
-	      )
+        Word16 x[],        /* (i)     : input vector                           */
+        Word16 h[],        /* (i)     : impulse response                       */
+        Word16 y[],        /* (o)     : output vector                          */
+        Word16 L           /* (i)     : vector size                            */
+          )
 {
-	Word32  i, n;
-	Word16 *tmpH,*tmpX;
-	Word32 s;
+    Word32  i, n;
+    Word16 *tmpH,*tmpX;
+    Word32 s;
         UNUSED(L);
 
-	for (n = 0; n < 64;)
-	{
-		tmpH = h+n;
-		tmpX = x;
-		i=n+1;
-		s = vo_mult32((*tmpX++), (*tmpH--));i--;
-		while(i>0)
-		{
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			i -= 4;
-		}
-		y[n] = ((s<<1) + 0x8000)>>16;
-		n++;
+    for (n = 0; n < 64;)
+    {
+        tmpH = h+n;
+        tmpX = x;
+        i=n+1;
+        s = vo_mult32((*tmpX++), (*tmpH--));i--;
+        while(i>0)
+        {
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            i -= 4;
+        }
+        y[n] = voround(L_shl(s, 1));
+        n++;
 
-		tmpH = h+n;
-		tmpX = x;
-		i=n+1;
-		s =  vo_mult32((*tmpX++), (*tmpH--));i--;
-		s += vo_mult32((*tmpX++), (*tmpH--));i--;
+        tmpH = h+n;
+        tmpX = x;
+        i=n+1;
+        s =  vo_mult32((*tmpX++), (*tmpH--));
+        i--;
+        s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+        i--;
 
-		while(i>0)
-		{
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			i -= 4;
-		}
-		y[n] = ((s<<1) + 0x8000)>>16;
-		n++;
+        while(i>0)
+        {
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            i -= 4;
+        }
+        y[n] = voround(L_shl(s, 1));
+        n++;
 
-		tmpH = h+n;
-		tmpX = x;
-		i=n+1;
-		s =  vo_mult32((*tmpX++), (*tmpH--));i--;
-		s += vo_mult32((*tmpX++), (*tmpH--));i--;
-		s += vo_mult32((*tmpX++), (*tmpH--));i--;
+        tmpH = h+n;
+        tmpX = x;
+        i=n+1;
+        s =  vo_mult32((*tmpX++), (*tmpH--));
+        i--;
+        s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+        i--;
+        s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+        i--;
 
-		while(i>0)
-		{
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			i -= 4;
-		}
-		y[n] = ((s<<1) + 0x8000)>>16;
-		n++;
+        while(i>0)
+        {
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            i -= 4;
+        }
+        y[n] = voround(L_shl(s, 1));
+        n++;
 
-		s = 0;
-		tmpH = h+n;
-		tmpX = x;
-		i=n+1;
-		while(i>0)
-		{
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			s += vo_mult32((*tmpX++), (*tmpH--));
-			i -= 4;
-		}
-		y[n] = ((s<<1) + 0x8000)>>16;
-		n++;
-	}
-	return;
+        s = 0;
+        tmpH = h+n;
+        tmpX = x;
+        i=n+1;
+        while(i>0)
+        {
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            s = L_add(s, vo_mult32((*tmpX++), (*tmpH--)));
+            i -= 4;
+        }
+        y[n] = voround(L_shl(s, 1));
+        n++;
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c b/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
index d9245ed..e834396 100644
--- a/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
+++ b/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
@@ -17,10 +17,10 @@
 /***********************************************************************
 *       File: cor_h_x.c                                                *
 *                                                                      *
-*	   Description:Compute correlation between target "x[]" and "h[]"  *
-*	               Designed for codebook search (24 pulses, 4 tracks,  *
-*				   4 pulses per track, 16 positions in each track) to  *
-*				   avoid saturation.                                   *
+*      Description:Compute correlation between target "x[]" and "h[]"  *
+*                  Designed for codebook search (24 pulses, 4 tracks,  *
+*                  4 pulses per track, 16 positions in each track) to  *
+*                  avoid saturation.                                   *
 *                                                                      *
 ************************************************************************/
 
@@ -33,94 +33,100 @@
 #define STEP      4
 
 void cor_h_x(
-		Word16 h[],                           /* (i) Q12 : impulse response of weighted synthesis filter */
-		Word16 x[],                           /* (i) Q0  : target vector                                 */
-		Word16 dn[]                           /* (o) <12bit : correlation between target and h[]         */
-	    )
+        Word16 h[],                           /* (i) Q12 : impulse response of weighted synthesis filter */
+        Word16 x[],                           /* (i) Q0  : target vector                                 */
+        Word16 dn[]                           /* (o) <12bit : correlation between target and h[]         */
+        )
 {
-	Word32 i, j;
-	Word32 L_tmp, y32[L_SUBFR], L_tot;
-	Word16 *p1, *p2;
-	Word32 *p3;
-	Word32 L_max, L_max1, L_max2, L_max3;
-	/* first keep the result on 32 bits and find absolute maximum */
-	L_tot  = 1;
-	L_max  = 0;
-	L_max1 = 0;
-	L_max2 = 0;
-	L_max3 = 0;
-	for (i = 0; i < L_SUBFR; i += STEP)
-	{
-		L_tmp = 1;                                    /* 1 -> to avoid null dn[] */
-		p1 = &x[i];
-		p2 = &h[0];
-		for (j = i; j < L_SUBFR; j++)
-			L_tmp += vo_L_mult(*p1++, *p2++);
+    Word32 i, j;
+    Word32 L_tmp, y32[L_SUBFR], L_tot;
+    Word16 *p1, *p2;
+    Word32 *p3;
+    Word32 L_max, L_max1, L_max2, L_max3;
+    /* first keep the result on 32 bits and find absolute maximum */
+    L_tot  = 1;
+    L_max  = 0;
+    L_max1 = 0;
+    L_max2 = 0;
+    L_max3 = 0;
+    for (i = 0; i < L_SUBFR; i += STEP)
+    {
+        L_tmp = 1;                                    /* 1 -> to avoid null dn[] */
+        p1 = &x[i];
+        p2 = &h[0];
+        for (j = i; j < L_SUBFR; j++)
+            L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
 
-		y32[i] = L_tmp;
-		L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
-		if(L_tmp > L_max)
-		{
-			L_max = L_tmp;
-		}
+        y32[i] = L_tmp;
+        L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
+        if(L_tmp > L_max)
+        {
+            L_max = L_tmp;
+        }
 
-		L_tmp = 1L;
-		p1 = &x[i+1];
-		p2 = &h[0];
-		for (j = i+1; j < L_SUBFR; j++)
-			L_tmp += vo_L_mult(*p1++, *p2++);
+        L_tmp = 1L;
+        p1 = &x[i+1];
+        p2 = &h[0];
+        for (j = i+1; j < L_SUBFR; j++)
+            L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
 
-		y32[i+1] = L_tmp;
-		L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
-		if(L_tmp > L_max1)
-		{
-			L_max1 = L_tmp;
-		}
+        y32[i+1] = L_tmp;
+        L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
+        if(L_tmp > L_max1)
+        {
+            L_max1 = L_tmp;
+        }
 
-		L_tmp = 1;
-		p1 = &x[i+2];
-		p2 = &h[0];
-		for (j = i+2; j < L_SUBFR; j++)
-			L_tmp += vo_L_mult(*p1++, *p2++);
+        L_tmp = 1;
+        p1 = &x[i+2];
+        p2 = &h[0];
+        for (j = i+2; j < L_SUBFR; j++)
+            L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
 
-		y32[i+2] = L_tmp;
-		L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
-		if(L_tmp > L_max2)
-		{
-			L_max2 = L_tmp;
-		}
+        y32[i+2] = L_tmp;
+        L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
+        if(L_tmp > L_max2)
+        {
+            L_max2 = L_tmp;
+        }
 
-		L_tmp = 1;
-		p1 = &x[i+3];
-		p2 = &h[0];
-		for (j = i+3; j < L_SUBFR; j++)
-			L_tmp += vo_L_mult(*p1++, *p2++);
+        L_tmp = 1;
+        p1 = &x[i+3];
+        p2 = &h[0];
+        for (j = i+3; j < L_SUBFR; j++)
+            L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
 
-		y32[i+3] = L_tmp;
-		L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
-		if(L_tmp > L_max3)
-		{
-			L_max3 = L_tmp;
-		}
-	}
-	/* tot += 3*max / 8 */
-	L_max = ((L_max + L_max1 + L_max2 + L_max3) >> 2);
-	L_tot = vo_L_add(L_tot, L_max);       /* +max/4 */
-	L_tot = vo_L_add(L_tot, (L_max >> 1));  /* +max/8 */
+        y32[i+3] = L_tmp;
+        L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
+        if(L_tmp > L_max3)
+        {
+            L_max3 = L_tmp;
+        }
+    }
+    /* tot += 3*max / 8 */
+    if (L_max > INT_MAX - L_max1 ||
+            L_max + L_max1 > INT_MAX - L_max2 ||
+            L_max + L_max1 + L_max2 > INT_MAX - L_max3) {
+        L_max = INT_MAX >> 2;
+    } else {
+        L_max = ((L_max + L_max1 + L_max2 + L_max3) >> 2);
+    }
+    L_tot = vo_L_add(L_tot, L_max);       /* +max/4 */
+    L_tot = vo_L_add(L_tot, (L_max >> 1));  /* +max/8 */
 
-	/* Find the number of right shifts to do on y32[] so that    */
-	/* 6.0 x sumation of max of dn[] in each track not saturate. */
-	j = norm_l(L_tot) - 4;             /* 4 -> 16 x tot */
-	p1 = dn;
-	p3 = y32;
-	for (i = 0; i < L_SUBFR; i+=4)
-	{
-		*p1++ = vo_round(L_shl(*p3++, j));
-		*p1++ = vo_round(L_shl(*p3++, j));
-		*p1++ = vo_round(L_shl(*p3++, j));
-		*p1++ = vo_round(L_shl(*p3++, j));
-	}
-	return;
+    /* Find the number of right shifts to do on y32[] so that    */
+    /* 6.0 x sumation of max of dn[] in each track not saturate. */
+    j = norm_l(L_tot) - 4;             /* 4 -> 16 x tot */
+    p1 = dn;
+    p3 = y32;
+    for (i = 0; i < L_SUBFR; i+=4)
+    {
+        *p1++ = vo_round(L_shl(*p3++, j));
+        *p1++ = vo_round(L_shl(*p3++, j));
+        *p1++ = vo_round(L_shl(*p3++, j));
+        *p1++ = vo_round(L_shl(*p3++, j));
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/decim54.c b/media/libstagefright/codecs/amrwbenc/src/decim54.c
index 3b88514..e4c7940 100644
--- a/media/libstagefright/codecs/amrwbenc/src/decim54.c
+++ b/media/libstagefright/codecs/amrwbenc/src/decim54.c
@@ -17,7 +17,7 @@
 /***********************************************************************
 *      File: decim54.c                                                 *
 *                                                                      *
-*	   Description:Decimation of 16kHz signal to 12.8kHz           *
+*      Description:Decimation of 16kHz signal to 12.8kHz           *
 *                                                                      *
 ************************************************************************/
 
@@ -33,114 +33,114 @@
 
 /* Local functions */
 static void Down_samp(
-		Word16 * sig,                         /* input:  signal to downsampling  */
-		Word16 * sig_d,                       /* output: downsampled signal      */
-		Word16 L_frame_d                      /* input:  length of output        */
-		);
+        Word16 * sig,                         /* input:  signal to downsampling  */
+        Word16 * sig_d,                       /* output: downsampled signal      */
+        Word16 L_frame_d                      /* input:  length of output        */
+        );
 
 /* 1/5 resolution interpolation filter  (in Q14)  */
 /* -1.5dB @ 6kHz, -6dB @ 6.4kHz, -10dB @ 6.6kHz, -20dB @ 6.9kHz, -25dB @ 7kHz, -55dB @ 8kHz */
 
 static Word16 fir_down1[4][30] =
 {
-	{-5, 24, -50, 54, 0, -128, 294, -408, 344, 0, -647, 1505, -2379, 3034, 13107, 3034, -2379, 1505, -647, 0, 344, -408,
-	294, -128, 0, 54, -50, 24, -5, 0},
+    {-5, 24, -50, 54, 0, -128, 294, -408, 344, 0, -647, 1505, -2379, 3034, 13107, 3034, -2379, 1505, -647, 0, 344, -408,
+    294, -128, 0, 54, -50, 24, -5, 0},
 
-	{-6, 19, -26, 0, 77, -188, 270, -233, 0, 434, -964, 1366, -1293, 0, 12254, 6575, -2746, 1030, 0, -507, 601, -441,
-	198, 0, -95, 99, -58, 18, 0, -1},
+    {-6, 19, -26, 0, 77, -188, 270, -233, 0, 434, -964, 1366, -1293, 0, 12254, 6575, -2746, 1030, 0, -507, 601, -441,
+    198, 0, -95, 99, -58, 18, 0, -1},
 
-	{-3, 9, 0, -41, 111, -170, 153, 0, -295, 649, -888, 770, 0, -1997, 9894, 9894, -1997, 0, 770, -888, 649, -295, 0,
-	153, -170, 111, -41, 0, 9, -3},
+    {-3, 9, 0, -41, 111, -170, 153, 0, -295, 649, -888, 770, 0, -1997, 9894, 9894, -1997, 0, 770, -888, 649, -295, 0,
+    153, -170, 111, -41, 0, 9, -3},
 
-	{-1, 0, 18, -58, 99, -95, 0, 198, -441, 601, -507, 0, 1030, -2746, 6575, 12254, 0, -1293, 1366, -964, 434, 0,
-	-233, 270, -188, 77, 0, -26, 19, -6}
+    {-1, 0, 18, -58, 99, -95, 0, 198, -441, 601, -507, 0, 1030, -2746, 6575, 12254, 0, -1293, 1366, -964, 434, 0,
+    -233, 270, -188, 77, 0, -26, 19, -6}
 };
 
 void Init_Decim_12k8(
-		Word16 mem[]                          /* output: memory (2*NB_COEF_DOWN) set to zeros */
-		)
+        Word16 mem[]                          /* output: memory (2*NB_COEF_DOWN) set to zeros */
+        )
 {
-	Set_zero(mem, 2 * NB_COEF_DOWN);
-	return;
+    Set_zero(mem, 2 * NB_COEF_DOWN);
+    return;
 }
 
 void Decim_12k8(
-		Word16 sig16k[],                      /* input:  signal to downsampling  */
-		Word16 lg,                            /* input:  length of input         */
-		Word16 sig12k8[],                     /* output: decimated signal        */
-		Word16 mem[]                          /* in/out: memory (2*NB_COEF_DOWN) */
-	       )
+        Word16 sig16k[],                      /* input:  signal to downsampling  */
+        Word16 lg,                            /* input:  length of input         */
+        Word16 sig12k8[],                     /* output: decimated signal        */
+        Word16 mem[]                          /* in/out: memory (2*NB_COEF_DOWN) */
+           )
 {
-	Word16 lg_down;
-	Word16 signal[L_FRAME16k + (2 * NB_COEF_DOWN)];
+    Word16 lg_down;
+    Word16 signal[L_FRAME16k + (2 * NB_COEF_DOWN)];
 
-	Copy(mem, signal, 2 * NB_COEF_DOWN);
+    Copy(mem, signal, 2 * NB_COEF_DOWN);
 
-	Copy(sig16k, signal + (2 * NB_COEF_DOWN), lg);
+    Copy(sig16k, signal + (2 * NB_COEF_DOWN), lg);
 
-	lg_down = (lg * DOWN_FAC)>>15;
+    lg_down = (lg * DOWN_FAC)>>15;
 
-	Down_samp(signal + NB_COEF_DOWN, sig12k8, lg_down);
+    Down_samp(signal + NB_COEF_DOWN, sig12k8, lg_down);
 
-	Copy(signal + lg, mem, 2 * NB_COEF_DOWN);
+    Copy(signal + lg, mem, 2 * NB_COEF_DOWN);
 
-	return;
+    return;
 }
 
 static void Down_samp(
-		Word16 * sig,                         /* input:  signal to downsampling  */
-		Word16 * sig_d,                       /* output: downsampled signal      */
-		Word16 L_frame_d                      /* input:  length of output        */
-		)
+        Word16 * sig,                         /* input:  signal to downsampling  */
+        Word16 * sig_d,                       /* output: downsampled signal      */
+        Word16 L_frame_d                      /* input:  length of output        */
+        )
 {
-	Word32 i, j, frac, pos;
-	Word16 *x, *y;
-	Word32 L_sum;
+    Word32 i, j, frac, pos;
+    Word16 *x, *y;
+    Word32 L_sum;
 
-	pos = 0;                                 /* position is in Q2 -> 1/4 resolution  */
-	for (j = 0; j < L_frame_d; j++)
-	{
-		i = (pos >> 2);                   /* integer part     */
-		frac = pos & 3;                   /* fractional part */
-		x = sig + i - NB_COEF_DOWN + 1;
-		y = (Word16 *)(fir_down1 + frac);
+    pos = 0;                                 /* position is in Q2 -> 1/4 resolution  */
+    for (j = 0; j < L_frame_d; j++)
+    {
+        i = (pos >> 2);                   /* integer part     */
+        frac = pos & 3;                   /* fractional part */
+        x = sig + i - NB_COEF_DOWN + 1;
+        y = (Word16 *)(fir_down1 + frac);
 
-		L_sum = vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x++),(*y++));
-		L_sum += vo_mult32((*x),(*y));
+        L_sum = vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x++),(*y++));
+        L_sum += vo_mult32((*x),(*y));
 
-		L_sum = L_shl2(L_sum, 2);
-		sig_d[j] = extract_h(L_add(L_sum, 0x8000));
-		pos += FAC5;              /* pos + 5/4 */
-	}
-	return;
+        L_sum = L_shl2(L_sum, 2);
+        sig_d[j] = extract_h(L_add(L_sum, 0x8000));
+        pos += FAC5;              /* pos + 5/4 */
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/deemph.c b/media/libstagefright/codecs/amrwbenc/src/deemph.c
index 0c49d6b..cc27f6e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/deemph.c
+++ b/media/libstagefright/codecs/amrwbenc/src/deemph.c
@@ -17,9 +17,9 @@
 /***********************************************************************
 *       File: deemph.c                                                 *
 *                                                                      *
-*	   Description:filtering through 1/(1-mu z^ -1)                    *
-*	               Deemph2 --> signal is divided by 2                  *
-*				   Deemph_32 --> for 32 bits signal.                   *
+*      Description:filtering through 1/(1-mu z^ -1)                    *
+*                  Deemph2 --> signal is divided by 2                  *
+*                  Deemph_32 --> for 32 bits signal.                   *
 *                                                                      *
 ************************************************************************/
 
@@ -28,89 +28,92 @@
 #include "math_op.h"
 
 void Deemph(
-		Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
-		Word16 mu,                            /* (i) Q15 : deemphasis factor                      */
-		Word16 L,                             /* (i)     : vector size                            */
-		Word16 * mem                          /* (i/o)   : memory (y[-1])                         */
-	   )
+        Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
+        Word16 mu,                            /* (i) Q15 : deemphasis factor                      */
+        Word16 L,                             /* (i)     : vector size                            */
+        Word16 * mem                          /* (i/o)   : memory (y[-1])                         */
+       )
 {
-	Word32 i;
-	Word32 L_tmp;
+    Word32 i;
+    Word32 L_tmp;
 
-	L_tmp = L_deposit_h(x[0]);
-	L_tmp = L_mac(L_tmp, *mem, mu);
-	x[0] = vo_round(L_tmp);
+    L_tmp = L_deposit_h(x[0]);
+    L_tmp = L_mac(L_tmp, *mem, mu);
+    x[0] = vo_round(L_tmp);
 
-	for (i = 1; i < L; i++)
-	{
-		L_tmp = L_deposit_h(x[i]);
-		L_tmp = L_mac(L_tmp, x[i - 1], mu);
-		x[i] = voround(L_tmp);
-	}
+    for (i = 1; i < L; i++)
+    {
+        L_tmp = L_deposit_h(x[i]);
+        L_tmp = L_mac(L_tmp, x[i - 1], mu);
+        x[i] = voround(L_tmp);
+    }
 
-	*mem = x[L - 1];
+    *mem = x[L - 1];
 
-	return;
+    return;
 }
 
 
 void Deemph2(
-		Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
-		Word16 mu,                            /* (i) Q15 : deemphasis factor                      */
-		Word16 L,                             /* (i)     : vector size                            */
-		Word16 * mem                          /* (i/o)   : memory (y[-1])                         */
-	    )
+        Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
+        Word16 mu,                            /* (i) Q15 : deemphasis factor                      */
+        Word16 L,                             /* (i)     : vector size                            */
+        Word16 * mem                          /* (i/o)   : memory (y[-1])                         */
+        )
 {
-	Word32 i;
-	Word32 L_tmp;
-	L_tmp = x[0] << 15;
-	L_tmp += ((*mem) * mu)<<1;
-	x[0] = (L_tmp + 0x8000)>>16;
-	for (i = 1; i < L; i++)
-	{
-		L_tmp = x[i] << 15;
-		L_tmp += (x[i - 1] * mu)<<1;
-		x[i] = (L_tmp + 0x8000)>>16;
-	}
-	*mem = x[L - 1];
-	return;
+    Word32 i;
+    Word32 L_tmp;
+    L_tmp = x[0] << 15;
+    i = L_mult(*mem, mu);
+    L_tmp = L_add(L_tmp, i);
+    x[0] = voround(L_tmp);
+    for (i = 1; i < L; i++)
+    {
+        Word32 tmp;
+        L_tmp = x[i] << 15;
+        tmp = (x[i - 1] * mu)<<1;
+        L_tmp = L_add(L_tmp, tmp);
+        x[i] = voround(L_tmp);
+    }
+    *mem = x[L - 1];
+    return;
 }
 
 
 void Deemph_32(
-		Word16 x_hi[],                        /* (i)     : input signal (bit31..16) */
-		Word16 x_lo[],                        /* (i)     : input signal (bit15..4)  */
-		Word16 y[],                           /* (o)     : output signal (x16)      */
-		Word16 mu,                            /* (i) Q15 : deemphasis factor        */
-		Word16 L,                             /* (i)     : vector size              */
-		Word16 * mem                          /* (i/o)   : memory (y[-1])           */
-	      )
+        Word16 x_hi[],                        /* (i)     : input signal (bit31..16) */
+        Word16 x_lo[],                        /* (i)     : input signal (bit15..4)  */
+        Word16 y[],                           /* (o)     : output signal (x16)      */
+        Word16 mu,                            /* (i) Q15 : deemphasis factor        */
+        Word16 L,                             /* (i)     : vector size              */
+        Word16 * mem                          /* (i/o)   : memory (y[-1])           */
+          )
 {
-	Word16 fac;
-	Word32 i, L_tmp;
+    Word16 fac;
+    Word32 i, L_tmp;
 
-	fac = mu >> 1;                                /* Q15 --> Q14 */
+    fac = mu >> 1;                                /* Q15 --> Q14 */
 
-	L_tmp = L_deposit_h(x_hi[0]);
-	L_tmp += (x_lo[0] * 8)<<1;
-	L_tmp = (L_tmp << 3);
-	L_tmp += ((*mem) * fac)<<1;
-	L_tmp = (L_tmp << 1);
-	y[0] = (L_tmp + 0x8000)>>16;
+    L_tmp = L_deposit_h(x_hi[0]);
+    L_tmp += (x_lo[0] * 8)<<1;
+    L_tmp = (L_tmp << 3);
+    L_tmp += ((*mem) * fac)<<1;
+    L_tmp = (L_tmp << 1);
+    y[0] = (L_tmp + 0x8000)>>16;
 
-	for (i = 1; i < L; i++)
-	{
-		L_tmp = L_deposit_h(x_hi[i]);
-		L_tmp += (x_lo[i] * 8)<<1;
-		L_tmp = (L_tmp << 3);
-		L_tmp += (y[i - 1] * fac)<<1;
-		L_tmp = (L_tmp << 1);
-		y[i] = (L_tmp + 0x8000)>>16;
-	}
+    for (i = 1; i < L; i++)
+    {
+        L_tmp = L_deposit_h(x_hi[i]);
+        L_tmp += (x_lo[i] * 8)<<1;
+        L_tmp = (L_tmp << 3);
+        L_tmp += (y[i - 1] * fac)<<1;
+        L_tmp = (L_tmp << 1);
+        y[i] = (L_tmp + 0x8000)>>16;
+    }
 
-	*mem = y[L - 1];
+    *mem = y[L - 1];
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/dtx.c b/media/libstagefright/codecs/amrwbenc/src/dtx.c
index 2cfaced0f..6be8683 100644
--- a/media/libstagefright/codecs/amrwbenc/src/dtx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/dtx.c
@@ -17,7 +17,7 @@
 /***********************************************************************
 *       File: dtx.c                                                    *
 *                                                                      *
-*	    Description:DTX functions                                  *
+*       Description:DTX functions                                  *
 *                                                                      *
 ************************************************************************/
 
@@ -35,33 +35,33 @@
 #include "mem_align.h"
 
 static void aver_isf_history(
-		Word16 isf_old[],
-		Word16 indices[],
-		Word32 isf_aver[]
-		);
+        Word16 isf_old[],
+        Word16 indices[],
+        Word32 isf_aver[]
+        );
 
 static void find_frame_indices(
-		Word16 isf_old_tx[],
-		Word16 indices[],
-		dtx_encState * st
-		);
+        Word16 isf_old_tx[],
+        Word16 indices[],
+        dtx_encState * st
+        );
 
 static Word16 dithering_control(
-		dtx_encState * st
-		);
+        dtx_encState * st
+        );
 
 /* excitation energy adjustment depending on speech coder mode used, Q7 */
 static Word16 en_adjust[9] =
 {
-	230,                                   /* mode0 = 7k  :  -5.4dB  */
-	179,                                   /* mode1 = 9k  :  -4.2dB  */
-	141,                                   /* mode2 = 12k :  -3.3dB  */
-	128,                                   /* mode3 = 14k :  -3.0dB  */
-	122,                                   /* mode4 = 16k :  -2.85dB */
-	115,                                   /* mode5 = 18k :  -2.7dB  */
-	115,                                   /* mode6 = 20k :  -2.7dB  */
-	115,                                   /* mode7 = 23k :  -2.7dB  */
-	115                                    /* mode8 = 24k :  -2.7dB  */
+    230,                                   /* mode0 = 7k  :  -5.4dB  */
+    179,                                   /* mode1 = 9k  :  -4.2dB  */
+    141,                                   /* mode2 = 12k :  -3.3dB  */
+    128,                                   /* mode3 = 14k :  -3.0dB  */
+    122,                                   /* mode4 = 16k :  -2.85dB */
+    115,                                   /* mode5 = 18k :  -2.7dB  */
+    115,                                   /* mode6 = 20k :  -2.7dB  */
+    115,                                   /* mode7 = 23k :  -2.7dB  */
+    115                                    /* mode8 = 24k :  -2.7dB  */
 };
 
 /**************************************************************************
@@ -71,24 +71,24 @@
 **************************************************************************/
 Word16 dtx_enc_init(dtx_encState ** st, Word16 isf_init[], VO_MEM_OPERATOR *pMemOP)
 {
-	dtx_encState *s;
+    dtx_encState *s;
 
-	if (st == (dtx_encState **) NULL)
-	{
-		fprintf(stderr, "dtx_enc_init: invalid parameter\n");
-		return -1;
-	}
-	*st = NULL;
+    if (st == (dtx_encState **) NULL)
+    {
+        fprintf(stderr, "dtx_enc_init: invalid parameter\n");
+        return -1;
+    }
+    *st = NULL;
 
-	/* allocate memory */
-	if ((s = (dtx_encState *)mem_malloc(pMemOP, sizeof(dtx_encState), 32, VO_INDEX_ENC_AMRWB)) == NULL)
-	{
-		fprintf(stderr, "dtx_enc_init: can not malloc state structure\n");
-		return -1;
-	}
-	dtx_enc_reset(s, isf_init);
-	*st = s;
-	return 0;
+    /* allocate memory */
+    if ((s = (dtx_encState *)mem_malloc(pMemOP, sizeof(dtx_encState), 32, VO_INDEX_ENC_AMRWB)) == NULL)
+    {
+        fprintf(stderr, "dtx_enc_init: can not malloc state structure\n");
+        return -1;
+    }
+    dtx_enc_reset(s, isf_init);
+    *st = s;
+    return 0;
 }
 
 /**************************************************************************
@@ -98,40 +98,40 @@
 **************************************************************************/
 Word16 dtx_enc_reset(dtx_encState * st, Word16 isf_init[])
 {
-	Word32 i;
+    Word32 i;
 
-	if (st == (dtx_encState *) NULL)
-	{
-		fprintf(stderr, "dtx_enc_reset: invalid parameter\n");
-		return -1;
-	}
-	st->hist_ptr = 0;
-	st->log_en_index = 0;
+    if (st == (dtx_encState *) NULL)
+    {
+        fprintf(stderr, "dtx_enc_reset: invalid parameter\n");
+        return -1;
+    }
+    st->hist_ptr = 0;
+    st->log_en_index = 0;
 
-	/* Init isf_hist[] */
-	for (i = 0; i < DTX_HIST_SIZE; i++)
-	{
-		Copy(isf_init, &st->isf_hist[i * M], M);
-	}
-	st->cng_seed = RANDOM_INITSEED;
+    /* Init isf_hist[] */
+    for (i = 0; i < DTX_HIST_SIZE; i++)
+    {
+        Copy(isf_init, &st->isf_hist[i * M], M);
+    }
+    st->cng_seed = RANDOM_INITSEED;
 
-	/* Reset energy history */
-	Set_zero(st->log_en_hist, DTX_HIST_SIZE);
+    /* Reset energy history */
+    Set_zero(st->log_en_hist, DTX_HIST_SIZE);
 
-	st->dtxHangoverCount = DTX_HANG_CONST;
-	st->decAnaElapsedCount = 32767;
+    st->dtxHangoverCount = DTX_HANG_CONST;
+    st->decAnaElapsedCount = 32767;
 
-	for (i = 0; i < 28; i++)
-	{
-		st->D[i] = 0;
-	}
+    for (i = 0; i < 28; i++)
+    {
+        st->D[i] = 0;
+    }
 
-	for (i = 0; i < DTX_HIST_SIZE - 1; i++)
-	{
-		st->sumD[i] = 0;
-	}
+    for (i = 0; i < DTX_HIST_SIZE - 1; i++)
+    {
+        st->sumD[i] = 0;
+    }
 
-	return 1;
+    return 1;
 }
 
 /**************************************************************************
@@ -141,12 +141,12 @@
 **************************************************************************/
 void dtx_enc_exit(dtx_encState ** st, VO_MEM_OPERATOR *pMemOP)
 {
-	if (st == NULL || *st == NULL)
-		return;
-	/* deallocate memory */
-	mem_free(pMemOP, *st, VO_INDEX_ENC_AMRWB);
-	*st = NULL;
-	return;
+    if (st == NULL || *st == NULL)
+        return;
+    /* deallocate memory */
+    mem_free(pMemOP, *st, VO_INDEX_ENC_AMRWB);
+    *st = NULL;
+    return;
 }
 
 
@@ -156,133 +156,133 @@
 *
 **************************************************************************/
 Word16 dtx_enc(
-		dtx_encState * st,                    /* i/o : State struct                                         */
-		Word16 isf[M],                        /* o   : CN ISF vector                                        */
-		Word16 * exc2,                        /* o   : CN excitation                                        */
-		Word16 ** prms
-	      )
+        dtx_encState * st,                    /* i/o : State struct                                         */
+        Word16 isf[M],                        /* o   : CN ISF vector                                        */
+        Word16 * exc2,                        /* o   : CN excitation                                        */
+        Word16 ** prms
+          )
 {
-	Word32 i, j;
-	Word16 indice[7];
-	Word16 log_en, gain, level, exp, exp0, tmp;
-	Word16 log_en_int_e, log_en_int_m;
-	Word32 L_isf[M], ener32, level32;
-	Word16 isf_order[3];
-	Word16 CN_dith;
+    Word32 i, j;
+    Word16 indice[7];
+    Word16 log_en, gain, level, exp, exp0, tmp;
+    Word16 log_en_int_e, log_en_int_m;
+    Word32 L_isf[M], ener32, level32;
+    Word16 isf_order[3];
+    Word16 CN_dith;
 
-	/* VOX mode computation of SID parameters */
-	log_en = 0;
-	for (i = 0; i < M; i++)
-	{
-		L_isf[i] = 0;
-	}
-	/* average energy and isf */
-	for (i = 0; i < DTX_HIST_SIZE; i++)
-	{
-		/* Division by DTX_HIST_SIZE = 8 has been done in dtx_buffer. log_en is in Q10 */
-		log_en = add(log_en, st->log_en_hist[i]);
+    /* VOX mode computation of SID parameters */
+    log_en = 0;
+    for (i = 0; i < M; i++)
+    {
+        L_isf[i] = 0;
+    }
+    /* average energy and isf */
+    for (i = 0; i < DTX_HIST_SIZE; i++)
+    {
+        /* Division by DTX_HIST_SIZE = 8 has been done in dtx_buffer. log_en is in Q10 */
+        log_en = add(log_en, st->log_en_hist[i]);
 
-	}
-	find_frame_indices(st->isf_hist, isf_order, st);
-	aver_isf_history(st->isf_hist, isf_order, L_isf);
+    }
+    find_frame_indices(st->isf_hist, isf_order, st);
+    aver_isf_history(st->isf_hist, isf_order, L_isf);
 
-	for (j = 0; j < M; j++)
-	{
-		isf[j] = (Word16)(L_isf[j] >> 3);  /* divide by 8 */
-	}
+    for (j = 0; j < M; j++)
+    {
+        isf[j] = (Word16)(L_isf[j] >> 3);  /* divide by 8 */
+    }
 
-	/* quantize logarithmic energy to 6 bits (-6 : 66 dB) which corresponds to -2:22 in log2(E).  */
-	/* st->log_en_index = (short)( (log_en + 2.0) * 2.625 ); */
+    /* quantize logarithmic energy to 6 bits (-6 : 66 dB) which corresponds to -2:22 in log2(E).  */
+    /* st->log_en_index = (short)( (log_en + 2.0) * 2.625 ); */
 
-	/* increase dynamics to 7 bits (Q8) */
-	log_en = (log_en >> 2);
+    /* increase dynamics to 7 bits (Q8) */
+    log_en = (log_en >> 2);
 
-	/* Add 2 in Q8 = 512 to get log2(E) between 0:24 */
-	log_en = add(log_en, 512);
+    /* Add 2 in Q8 = 512 to get log2(E) between 0:24 */
+    log_en = add(log_en, 512);
 
-	/* Multiply by 2.625 to get full 6 bit range. 2.625 = 21504 in Q13. The result is in Q6 */
-	log_en = mult(log_en, 21504);
+    /* Multiply by 2.625 to get full 6 bit range. 2.625 = 21504 in Q13. The result is in Q6 */
+    log_en = mult(log_en, 21504);
 
-	/* Quantize Energy */
-	st->log_en_index = shr(log_en, 6);
+    /* Quantize Energy */
+    st->log_en_index = shr(log_en, 6);
 
-	if(st->log_en_index > 63)
-	{
-		st->log_en_index = 63;
-	}
-	if (st->log_en_index < 0)
-	{
-		st->log_en_index = 0;
-	}
-	/* Quantize ISFs */
-	Qisf_ns(isf, isf, indice);
+    if(st->log_en_index > 63)
+    {
+        st->log_en_index = 63;
+    }
+    if (st->log_en_index < 0)
+    {
+        st->log_en_index = 0;
+    }
+    /* Quantize ISFs */
+    Qisf_ns(isf, isf, indice);
 
 
-	Parm_serial(indice[0], 6, prms);
-	Parm_serial(indice[1], 6, prms);
-	Parm_serial(indice[2], 6, prms);
-	Parm_serial(indice[3], 5, prms);
-	Parm_serial(indice[4], 5, prms);
+    Parm_serial(indice[0], 6, prms);
+    Parm_serial(indice[1], 6, prms);
+    Parm_serial(indice[2], 6, prms);
+    Parm_serial(indice[3], 5, prms);
+    Parm_serial(indice[4], 5, prms);
 
-	Parm_serial((st->log_en_index), 6, prms);
+    Parm_serial((st->log_en_index), 6, prms);
 
-	CN_dith = dithering_control(st);
-	Parm_serial(CN_dith, 1, prms);
+    CN_dith = dithering_control(st);
+    Parm_serial(CN_dith, 1, prms);
 
-	/* level = (float)( pow( 2.0f, (float)st->log_en_index / 2.625 - 2.0 ) );    */
-	/* log2(E) in Q9 (log2(E) lies in between -2:22) */
-	log_en = shl(st->log_en_index, 15 - 6);
+    /* level = (float)( pow( 2.0f, (float)st->log_en_index / 2.625 - 2.0 ) );    */
+    /* log2(E) in Q9 (log2(E) lies in between -2:22) */
+    log_en = shl(st->log_en_index, 15 - 6);
 
-	/* Divide by 2.625; log_en will be between 0:24  */
-	log_en = mult(log_en, 12483);
-	/* the result corresponds to log2(gain) in Q10 */
+    /* Divide by 2.625; log_en will be between 0:24  */
+    log_en = mult(log_en, 12483);
+    /* the result corresponds to log2(gain) in Q10 */
 
-	/* Find integer part  */
-	log_en_int_e = (log_en >> 10);
+    /* Find integer part  */
+    log_en_int_e = (log_en >> 10);
 
-	/* Find fractional part */
-	log_en_int_m = (Word16) (log_en & 0x3ff);
-	log_en_int_m = shl(log_en_int_m, 5);
+    /* Find fractional part */
+    log_en_int_m = (Word16) (log_en & 0x3ff);
+    log_en_int_m = shl(log_en_int_m, 5);
 
-	/* Subtract 2 from log_en in Q9, i.e divide the gain by 2 (energy by 4) */
-	/* Add 16 in order to have the result of pow2 in Q16 */
-	log_en_int_e = add(log_en_int_e, 16 - 1);
+    /* Subtract 2 from log_en in Q9, i.e divide the gain by 2 (energy by 4) */
+    /* Add 16 in order to have the result of pow2 in Q16 */
+    log_en_int_e = add(log_en_int_e, 16 - 1);
 
-	level32 = Pow2(log_en_int_e, log_en_int_m); /* Q16 */
-	exp0 = norm_l(level32);
-	level32 = (level32 << exp0);        /* level in Q31 */
-	exp0 = (15 - exp0);
-	level = extract_h(level32);            /* level in Q15 */
+    level32 = Pow2(log_en_int_e, log_en_int_m); /* Q16 */
+    exp0 = norm_l(level32);
+    level32 = (level32 << exp0);        /* level in Q31 */
+    exp0 = (15 - exp0);
+    level = extract_h(level32);            /* level in Q15 */
 
-	/* generate white noise vector */
-	for (i = 0; i < L_FRAME; i++)
-	{
-		exc2[i] = (Random(&(st->cng_seed)) >> 4);
-	}
+    /* generate white noise vector */
+    for (i = 0; i < L_FRAME; i++)
+    {
+        exc2[i] = (Random(&(st->cng_seed)) >> 4);
+    }
 
-	/* gain = level / sqrt(ener) * sqrt(L_FRAME) */
+    /* gain = level / sqrt(ener) * sqrt(L_FRAME) */
 
-	/* energy of generated excitation */
-	ener32 = Dot_product12(exc2, exc2, L_FRAME, &exp);
+    /* energy of generated excitation */
+    ener32 = Dot_product12(exc2, exc2, L_FRAME, &exp);
 
-	Isqrt_n(&ener32, &exp);
+    Isqrt_n(&ener32, &exp);
 
-	gain = extract_h(ener32);
+    gain = extract_h(ener32);
 
-	gain = mult(level, gain);              /* gain in Q15 */
+    gain = mult(level, gain);              /* gain in Q15 */
 
-	exp = add(exp0, exp);
+    exp = add(exp0, exp);
 
-	/* Multiply by sqrt(L_FRAME)=16, i.e. shift left by 4 */
-	exp += 4;
+    /* Multiply by sqrt(L_FRAME)=16, i.e. shift left by 4 */
+    exp += 4;
 
-	for (i = 0; i < L_FRAME; i++)
-	{
-		tmp = mult(exc2[i], gain);         /* Q0 * Q15 */
-		exc2[i] = shl(tmp, exp);
-	}
+    for (i = 0; i < L_FRAME; i++)
+    {
+        tmp = mult(exc2[i], gain);         /* Q0 * Q15 */
+        exc2[i] = shl(tmp, exp);
+    }
 
-	return 0;
+    return 0;
 }
 
 /**************************************************************************
@@ -291,45 +291,45 @@
 *
 **************************************************************************/
 Word16 dtx_buffer(
-		dtx_encState * st,                    /* i/o : State struct                    */
-		Word16 isf_new[],                     /* i   : isf vector                      */
-		Word32 enr,                           /* i   : residual energy (in L_FRAME)    */
-		Word16 codec_mode
-		)
+        dtx_encState * st,                    /* i/o : State struct                    */
+        Word16 isf_new[],                     /* i   : isf vector                      */
+        Word32 enr,                           /* i   : residual energy (in L_FRAME)    */
+        Word16 codec_mode
+        )
 {
-	Word16 log_en;
+    Word16 log_en;
 
-	Word16 log_en_e;
-	Word16 log_en_m;
-	st->hist_ptr = add(st->hist_ptr, 1);
-	if(st->hist_ptr == DTX_HIST_SIZE)
-	{
-		st->hist_ptr = 0;
-	}
-	/* copy lsp vector into buffer */
-	Copy(isf_new, &st->isf_hist[st->hist_ptr * M], M);
+    Word16 log_en_e;
+    Word16 log_en_m;
+    st->hist_ptr = add(st->hist_ptr, 1);
+    if(st->hist_ptr == DTX_HIST_SIZE)
+    {
+        st->hist_ptr = 0;
+    }
+    /* copy lsp vector into buffer */
+    Copy(isf_new, &st->isf_hist[st->hist_ptr * M], M);
 
-	/* log_en = (float)log10(enr*0.0059322)/(float)log10(2.0f);  */
-	Log2(enr, &log_en_e, &log_en_m);
+    /* log_en = (float)log10(enr*0.0059322)/(float)log10(2.0f);  */
+    Log2(enr, &log_en_e, &log_en_m);
 
-	/* convert exponent and mantissa to Word16 Q7. Q7 is used to simplify averaging in dtx_enc */
-	log_en = shl(log_en_e, 7);             /* Q7 */
-	log_en = add(log_en, shr(log_en_m, 15 - 7));
+    /* convert exponent and mantissa to Word16 Q7. Q7 is used to simplify averaging in dtx_enc */
+    log_en = shl(log_en_e, 7);             /* Q7 */
+    log_en = add(log_en, shr(log_en_m, 15 - 7));
 
-	/* Find energy per sample by multiplying with 0.0059322, i.e subtract log2(1/0.0059322) = 7.39722 The
-	 * constant 0.0059322 takes into account windowings and analysis length from autocorrelation
-	 * computations; 7.39722 in Q7 = 947  */
-	/* Subtract 3 dB = 0.99658 in log2(E) = 127 in Q7. */
-	/* log_en = sub( log_en, 947 + en_adjust[codec_mode] ); */
+    /* Find energy per sample by multiplying with 0.0059322, i.e subtract log2(1/0.0059322) = 7.39722 The
+     * constant 0.0059322 takes into account windowings and analysis length from autocorrelation
+     * computations; 7.39722 in Q7 = 947  */
+    /* Subtract 3 dB = 0.99658 in log2(E) = 127 in Q7. */
+    /* log_en = sub( log_en, 947 + en_adjust[codec_mode] ); */
 
-	/* Find energy per sample (divide by L_FRAME=256), i.e subtract log2(256) = 8.0  (1024 in Q7) */
-	/* Subtract 3 dB = 0.99658 in log2(E) = 127 in Q7. */
+    /* Find energy per sample (divide by L_FRAME=256), i.e subtract log2(256) = 8.0  (1024 in Q7) */
+    /* Subtract 3 dB = 0.99658 in log2(E) = 127 in Q7. */
 
-	log_en = sub(log_en, add(1024, en_adjust[codec_mode]));
+    log_en = sub(log_en, add(1024, en_adjust[codec_mode]));
 
-	/* Insert into the buffer */
-	st->log_en_hist[st->hist_ptr] = log_en;
-	return 0;
+    /* Insert into the buffer */
+    st->log_en_hist[st->hist_ptr] = log_en;
+    return 0;
 }
 
 /**************************************************************************
@@ -339,267 +339,267 @@
 *                                            the decoding side.
 **************************************************************************/
 void tx_dtx_handler(dtx_encState * st,     /* i/o : State struct           */
-		Word16 vad_flag,                      /* i   : vad decision           */
-		Word16 * usedMode                     /* i/o : mode changed or not    */
-		)
+        Word16 vad_flag,                      /* i   : vad decision           */
+        Word16 * usedMode                     /* i/o : mode changed or not    */
+        )
 {
 
-	/* this state machine is in synch with the GSMEFR txDtx machine      */
-	st->decAnaElapsedCount = add(st->decAnaElapsedCount, 1);
+    /* this state machine is in synch with the GSMEFR txDtx machine      */
+    st->decAnaElapsedCount = add(st->decAnaElapsedCount, 1);
 
-	if (vad_flag != 0)
-	{
-		st->dtxHangoverCount = DTX_HANG_CONST;
-	} else
-	{                                      /* non-speech */
-		if (st->dtxHangoverCount == 0)
-		{                                  /* out of decoder analysis hangover  */
-			st->decAnaElapsedCount = 0;
-			*usedMode = MRDTX;
-		} else
-		{                                  /* in possible analysis hangover */
-			st->dtxHangoverCount = sub(st->dtxHangoverCount, 1);
+    if (vad_flag != 0)
+    {
+        st->dtxHangoverCount = DTX_HANG_CONST;
+    } else
+    {                                      /* non-speech */
+        if (st->dtxHangoverCount == 0)
+        {                                  /* out of decoder analysis hangover  */
+            st->decAnaElapsedCount = 0;
+            *usedMode = MRDTX;
+        } else
+        {                                  /* in possible analysis hangover */
+            st->dtxHangoverCount = sub(st->dtxHangoverCount, 1);
 
-			/* decAnaElapsedCount + dtxHangoverCount < DTX_ELAPSED_FRAMES_THRESH */
-			if (sub(add(st->decAnaElapsedCount, st->dtxHangoverCount),
-						DTX_ELAPSED_FRAMES_THRESH) < 0)
-			{
-				*usedMode = MRDTX;
-				/* if short time since decoder update, do not add extra HO */
-			}
-			/* else override VAD and stay in speech mode *usedMode and add extra hangover */
-		}
-	}
+            /* decAnaElapsedCount + dtxHangoverCount < DTX_ELAPSED_FRAMES_THRESH */
+            if (sub(add(st->decAnaElapsedCount, st->dtxHangoverCount),
+                        DTX_ELAPSED_FRAMES_THRESH) < 0)
+            {
+                *usedMode = MRDTX;
+                /* if short time since decoder update, do not add extra HO */
+            }
+            /* else override VAD and stay in speech mode *usedMode and add extra hangover */
+        }
+    }
 
-	return;
+    return;
 }
 
 
 
 static void aver_isf_history(
-		Word16 isf_old[],
-		Word16 indices[],
-		Word32 isf_aver[]
-		)
+        Word16 isf_old[],
+        Word16 indices[],
+        Word32 isf_aver[]
+        )
 {
-	Word32 i, j, k;
-	Word16 isf_tmp[2 * M];
-	Word32 L_tmp;
+    Word32 i, j, k;
+    Word16 isf_tmp[2 * M];
+    Word32 L_tmp;
 
-	/* Memorize in isf_tmp[][] the ISF vectors to be replaced by */
-	/* the median ISF vector prior to the averaging               */
-	for (k = 0; k < 2; k++)
-	{
-		if ((indices[k] + 1) != 0)
-		{
-			for (i = 0; i < M; i++)
-			{
-				isf_tmp[k * M + i] = isf_old[indices[k] * M + i];
-				isf_old[indices[k] * M + i] = isf_old[indices[2] * M + i];
-			}
-		}
-	}
+    /* Memorize in isf_tmp[][] the ISF vectors to be replaced by */
+    /* the median ISF vector prior to the averaging               */
+    for (k = 0; k < 2; k++)
+    {
+        if ((indices[k] + 1) != 0)
+        {
+            for (i = 0; i < M; i++)
+            {
+                isf_tmp[k * M + i] = isf_old[indices[k] * M + i];
+                isf_old[indices[k] * M + i] = isf_old[indices[2] * M + i];
+            }
+        }
+    }
 
-	/* Perform the ISF averaging */
-	for (j = 0; j < M; j++)
-	{
-		L_tmp = 0;
+    /* Perform the ISF averaging */
+    for (j = 0; j < M; j++)
+    {
+        L_tmp = 0;
 
-		for (i = 0; i < DTX_HIST_SIZE; i++)
-		{
-			L_tmp = L_add(L_tmp, L_deposit_l(isf_old[i * M + j]));
-		}
-		isf_aver[j] = L_tmp;
-	}
+        for (i = 0; i < DTX_HIST_SIZE; i++)
+        {
+            L_tmp = L_add(L_tmp, L_deposit_l(isf_old[i * M + j]));
+        }
+        isf_aver[j] = L_tmp;
+    }
 
-	/* Retrieve from isf_tmp[][] the ISF vectors saved prior to averaging */
-	for (k = 0; k < 2; k++)
-	{
-		if ((indices[k] + 1) != 0)
-		{
-			for (i = 0; i < M; i++)
-			{
-				isf_old[indices[k] * M + i] = isf_tmp[k * M + i];
-			}
-		}
-	}
+    /* Retrieve from isf_tmp[][] the ISF vectors saved prior to averaging */
+    for (k = 0; k < 2; k++)
+    {
+        if ((indices[k] + 1) != 0)
+        {
+            for (i = 0; i < M; i++)
+            {
+                isf_old[indices[k] * M + i] = isf_tmp[k * M + i];
+            }
+        }
+    }
 
-	return;
+    return;
 }
 
 static void find_frame_indices(
-		Word16 isf_old_tx[],
-		Word16 indices[],
-		dtx_encState * st
-		)
+        Word16 isf_old_tx[],
+        Word16 indices[],
+        dtx_encState * st
+        )
 {
-	Word32 L_tmp, summin, summax, summax2nd;
-	Word16 i, j, tmp;
-	Word16 ptr;
+    Word32 L_tmp, summin, summax, summax2nd;
+    Word16 i, j, tmp;
+    Word16 ptr;
 
-	/* Remove the effect of the oldest frame from the column */
-	/* sum sumD[0..DTX_HIST_SIZE-1]. sumD[DTX_HIST_SIZE] is    */
-	/* not updated since it will be removed later.           */
+    /* Remove the effect of the oldest frame from the column */
+    /* sum sumD[0..DTX_HIST_SIZE-1]. sumD[DTX_HIST_SIZE] is    */
+    /* not updated since it will be removed later.           */
 
-	tmp = DTX_HIST_SIZE_MIN_ONE;
-	j = -1;
-	for (i = 0; i < DTX_HIST_SIZE_MIN_ONE; i++)
-	{
-		j = add(j, tmp);
-		st->sumD[i] = L_sub(st->sumD[i], st->D[j]);
-		tmp = sub(tmp, 1);
-	}
+    tmp = DTX_HIST_SIZE_MIN_ONE;
+    j = -1;
+    for (i = 0; i < DTX_HIST_SIZE_MIN_ONE; i++)
+    {
+        j = add(j, tmp);
+        st->sumD[i] = L_sub(st->sumD[i], st->D[j]);
+        tmp = sub(tmp, 1);
+    }
 
-	/* Shift the column sum sumD. The element sumD[DTX_HIST_SIZE-1]    */
-	/* corresponding to the oldest frame is removed. The sum of     */
-	/* the distances between the latest isf and other isfs, */
-	/* i.e. the element sumD[0], will be computed during this call. */
-	/* Hence this element is initialized to zero.                   */
+    /* Shift the column sum sumD. The element sumD[DTX_HIST_SIZE-1]    */
+    /* corresponding to the oldest frame is removed. The sum of     */
+    /* the distances between the latest isf and other isfs, */
+    /* i.e. the element sumD[0], will be computed during this call. */
+    /* Hence this element is initialized to zero.                   */
 
-	for (i = DTX_HIST_SIZE_MIN_ONE; i > 0; i--)
-	{
-		st->sumD[i] = st->sumD[i - 1];
-	}
-	st->sumD[0] = 0;
+    for (i = DTX_HIST_SIZE_MIN_ONE; i > 0; i--)
+    {
+        st->sumD[i] = st->sumD[i - 1];
+    }
+    st->sumD[0] = 0;
 
-	/* Remove the oldest frame from the distance matrix.           */
-	/* Note that the distance matrix is replaced by a one-         */
-	/* dimensional array to save static memory.                    */
+    /* Remove the oldest frame from the distance matrix.           */
+    /* Note that the distance matrix is replaced by a one-         */
+    /* dimensional array to save static memory.                    */
 
-	tmp = 0;
-	for (i = 27; i >= 12; i = (Word16) (i - tmp))
-	{
-		tmp = add(tmp, 1);
-		for (j = tmp; j > 0; j--)
-		{
-			st->D[i - j + 1] = st->D[i - j - tmp];
-		}
-	}
+    tmp = 0;
+    for (i = 27; i >= 12; i = (Word16) (i - tmp))
+    {
+        tmp = add(tmp, 1);
+        for (j = tmp; j > 0; j--)
+        {
+            st->D[i - j + 1] = st->D[i - j - tmp];
+        }
+    }
 
-	/* Compute the first column of the distance matrix D            */
-	/* (squared Euclidean distances from isf1[] to isf_old_tx[][]). */
+    /* Compute the first column of the distance matrix D            */
+    /* (squared Euclidean distances from isf1[] to isf_old_tx[][]). */
 
-	ptr = st->hist_ptr;
-	for (i = 1; i < DTX_HIST_SIZE; i++)
-	{
-		/* Compute the distance between the latest isf and the other isfs. */
-		ptr = sub(ptr, 1);
-		if (ptr < 0)
-		{
-			ptr = DTX_HIST_SIZE_MIN_ONE;
-		}
-		L_tmp = 0;
-		for (j = 0; j < M; j++)
-		{
-			tmp = sub(isf_old_tx[st->hist_ptr * M + j], isf_old_tx[ptr * M + j]);
-			L_tmp = L_mac(L_tmp, tmp, tmp);
-		}
-		st->D[i - 1] = L_tmp;
+    ptr = st->hist_ptr;
+    for (i = 1; i < DTX_HIST_SIZE; i++)
+    {
+        /* Compute the distance between the latest isf and the other isfs. */
+        ptr = sub(ptr, 1);
+        if (ptr < 0)
+        {
+            ptr = DTX_HIST_SIZE_MIN_ONE;
+        }
+        L_tmp = 0;
+        for (j = 0; j < M; j++)
+        {
+            tmp = sub(isf_old_tx[st->hist_ptr * M + j], isf_old_tx[ptr * M + j]);
+            L_tmp = L_mac(L_tmp, tmp, tmp);
+        }
+        st->D[i - 1] = L_tmp;
 
-		/* Update also the column sums. */
-		st->sumD[0] = L_add(st->sumD[0], st->D[i - 1]);
-		st->sumD[i] = L_add(st->sumD[i], st->D[i - 1]);
-	}
+        /* Update also the column sums. */
+        st->sumD[0] = L_add(st->sumD[0], st->D[i - 1]);
+        st->sumD[i] = L_add(st->sumD[i], st->D[i - 1]);
+    }
 
-	/* Find the minimum and maximum distances */
-	summax = st->sumD[0];
-	summin = st->sumD[0];
-	indices[0] = 0;
-	indices[2] = 0;
-	for (i = 1; i < DTX_HIST_SIZE; i++)
-	{
-		if (L_sub(st->sumD[i], summax) > 0)
-		{
-			indices[0] = i;
-			summax = st->sumD[i];
-		}
-		if (L_sub(st->sumD[i], summin) < 0)
-		{
-			indices[2] = i;
-			summin = st->sumD[i];
-		}
-	}
+    /* Find the minimum and maximum distances */
+    summax = st->sumD[0];
+    summin = st->sumD[0];
+    indices[0] = 0;
+    indices[2] = 0;
+    for (i = 1; i < DTX_HIST_SIZE; i++)
+    {
+        if (L_sub(st->sumD[i], summax) > 0)
+        {
+            indices[0] = i;
+            summax = st->sumD[i];
+        }
+        if (L_sub(st->sumD[i], summin) < 0)
+        {
+            indices[2] = i;
+            summin = st->sumD[i];
+        }
+    }
 
-	/* Find the second largest distance */
-	summax2nd = -2147483647L;
-	indices[1] = -1;
-	for (i = 0; i < DTX_HIST_SIZE; i++)
-	{
-		if ((L_sub(st->sumD[i], summax2nd) > 0) && (sub(i, indices[0]) != 0))
-		{
-			indices[1] = i;
-			summax2nd = st->sumD[i];
-		}
-	}
+    /* Find the second largest distance */
+    summax2nd = -2147483647L;
+    indices[1] = -1;
+    for (i = 0; i < DTX_HIST_SIZE; i++)
+    {
+        if ((L_sub(st->sumD[i], summax2nd) > 0) && (sub(i, indices[0]) != 0))
+        {
+            indices[1] = i;
+            summax2nd = st->sumD[i];
+        }
+    }
 
-	for (i = 0; i < 3; i++)
-	{
-		indices[i] = sub(st->hist_ptr, indices[i]);
-		if (indices[i] < 0)
-		{
-			indices[i] = add(indices[i], DTX_HIST_SIZE);
-		}
-	}
+    for (i = 0; i < 3; i++)
+    {
+        indices[i] = sub(st->hist_ptr, indices[i]);
+        if (indices[i] < 0)
+        {
+            indices[i] = add(indices[i], DTX_HIST_SIZE);
+        }
+    }
 
-	/* If maximum distance/MED_THRESH is smaller than minimum distance */
-	/* then the median ISF vector replacement is not performed         */
-	tmp = norm_l(summax);
-	summax = (summax << tmp);
-	summin = (summin << tmp);
-	L_tmp = L_mult(voround(summax), INV_MED_THRESH);
-	if(L_tmp <= summin)
-	{
-		indices[0] = -1;
-	}
-	/* If second largest distance/MED_THRESH is smaller than     */
-	/* minimum distance then the median ISF vector replacement is    */
-	/* not performed                                                 */
-	summax2nd = L_shl(summax2nd, tmp);
-	L_tmp = L_mult(voround(summax2nd), INV_MED_THRESH);
-	if(L_tmp <= summin)
-	{
-		indices[1] = -1;
-	}
-	return;
+    /* If maximum distance/MED_THRESH is smaller than minimum distance */
+    /* then the median ISF vector replacement is not performed         */
+    tmp = norm_l(summax);
+    summax = (summax << tmp);
+    summin = (summin << tmp);
+    L_tmp = L_mult(voround(summax), INV_MED_THRESH);
+    if(L_tmp <= summin)
+    {
+        indices[0] = -1;
+    }
+    /* If second largest distance/MED_THRESH is smaller than     */
+    /* minimum distance then the median ISF vector replacement is    */
+    /* not performed                                                 */
+    summax2nd = L_shl(summax2nd, tmp);
+    L_tmp = L_mult(voround(summax2nd), INV_MED_THRESH);
+    if(L_tmp <= summin)
+    {
+        indices[1] = -1;
+    }
+    return;
 }
 
 static Word16 dithering_control(
-		dtx_encState * st
-		)
+        dtx_encState * st
+        )
 {
-	Word16 tmp, mean, CN_dith, gain_diff;
-	Word32 i, ISF_diff;
+    Word16 tmp, mean, CN_dith, gain_diff;
+    Word32 i, ISF_diff;
 
-	/* determine how stationary the spectrum of background noise is */
-	ISF_diff = 0;
-	for (i = 0; i < 8; i++)
-	{
-		ISF_diff = L_add(ISF_diff, st->sumD[i]);
-	}
-	if ((ISF_diff >> 26) > 0)
-	{
-		CN_dith = 1;
-	} else
-	{
-		CN_dith = 0;
-	}
+    /* determine how stationary the spectrum of background noise is */
+    ISF_diff = 0;
+    for (i = 0; i < 8; i++)
+    {
+        ISF_diff = L_add(ISF_diff, st->sumD[i]);
+    }
+    if ((ISF_diff >> 26) > 0)
+    {
+        CN_dith = 1;
+    } else
+    {
+        CN_dith = 0;
+    }
 
-	/* determine how stationary the energy of background noise is */
-	mean = 0;
-	for (i = 0; i < DTX_HIST_SIZE; i++)
-	{
-		mean = add(mean, st->log_en_hist[i]);
-	}
-	mean = (mean >> 3);
-	gain_diff = 0;
-	for (i = 0; i < DTX_HIST_SIZE; i++)
-	{
-		tmp = abs_s(sub(st->log_en_hist[i], mean));
-		gain_diff = add(gain_diff, tmp);
-	}
-	if (gain_diff > GAIN_THR)
-	{
-		CN_dith = 1;
-	}
-	return CN_dith;
+    /* determine how stationary the energy of background noise is */
+    mean = 0;
+    for (i = 0; i < DTX_HIST_SIZE; i++)
+    {
+        mean = add(mean, st->log_en_hist[i]);
+    }
+    mean = (mean >> 3);
+    gain_diff = 0;
+    for (i = 0; i < DTX_HIST_SIZE; i++)
+    {
+        tmp = abs_s(sub(st->log_en_hist[i], mean));
+        gain_diff = add(gain_diff, tmp);
+    }
+    if (gain_diff > GAIN_THR)
+    {
+        CN_dith = 1;
+    }
+    return CN_dith;
 }
diff --git a/media/libstagefright/codecs/amrwbenc/src/g_pitch.c b/media/libstagefright/codecs/amrwbenc/src/g_pitch.c
index d681f2e..98ee87e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/g_pitch.c
+++ b/media/libstagefright/codecs/amrwbenc/src/g_pitch.c
@@ -17,9 +17,9 @@
 /***********************************************************************
 *      File: g_pitch.c                                                 *
 *                                                                      *
-*	   Description:Compute the gain of pitch. Result in Q12        *
-*	               if(gain < 0) gain = 0                           *
-*				   if(gain > 1.2) gain = 1.2           *
+*      Description:Compute the gain of pitch. Result in Q12        *
+*                  if(gain < 0) gain = 0                           *
+*                  if(gain > 1.2) gain = 1.2           *
 ************************************************************************/
 
 #include "typedef.h"
@@ -27,52 +27,52 @@
 #include "math_op.h"
 
 Word16 G_pitch(                            /* (o) Q14 : Gain of pitch lag saturated to 1.2   */
-		Word16 xn[],                          /* (i)     : Pitch target.                        */
-		Word16 y1[],                          /* (i)     : filtered adaptive codebook.          */
-		Word16 g_coeff[],                     /* : Correlations need for gain quantization.     */
-		Word16 L_subfr                        /* : Length of subframe.                          */
-	      )
+        Word16 xn[],                          /* (i)     : Pitch target.                        */
+        Word16 y1[],                          /* (i)     : filtered adaptive codebook.          */
+        Word16 g_coeff[],                     /* : Correlations need for gain quantization.     */
+        Word16 L_subfr                        /* : Length of subframe.                          */
+          )
 {
-	Word32 i;
-	Word16 xy, yy, exp_xy, exp_yy, gain;
-	/* Compute scalar product <y1[],y1[]> */
+    Word32 i;
+    Word16 xy, yy, exp_xy, exp_yy, gain;
+    /* Compute scalar product <y1[],y1[]> */
 #ifdef ASM_OPT                  /* asm optimization branch */
-	/* Compute scalar product <xn[],y1[]> */
-	xy = extract_h(Dot_product12_asm(xn, y1, L_subfr, &exp_xy));
-	yy = extract_h(Dot_product12_asm(y1, y1, L_subfr, &exp_yy));
+    /* Compute scalar product <xn[],y1[]> */
+    xy = extract_h(Dot_product12_asm(xn, y1, L_subfr, &exp_xy));
+    yy = extract_h(Dot_product12_asm(y1, y1, L_subfr, &exp_yy));
 
 #else
-	/* Compute scalar product <xn[],y1[]> */
-	xy = extract_h(Dot_product12(xn, y1, L_subfr, &exp_xy));
-	yy = extract_h(Dot_product12(y1, y1, L_subfr, &exp_yy));
+    /* Compute scalar product <xn[],y1[]> */
+    xy = extract_h(Dot_product12(xn, y1, L_subfr, &exp_xy));
+    yy = extract_h(Dot_product12(y1, y1, L_subfr, &exp_yy));
 
 #endif
 
-	g_coeff[0] = yy;
-	g_coeff[1] = exp_yy;
-	g_coeff[2] = xy;
-	g_coeff[3] = exp_xy;
+    g_coeff[0] = yy;
+    g_coeff[1] = exp_yy;
+    g_coeff[2] = xy;
+    g_coeff[3] = exp_xy;
 
-	/* If (xy < 0) gain = 0 */
-	if (xy < 0)
-		return ((Word16) 0);
+    /* If (xy < 0) gain = 0 */
+    if (xy < 0)
+        return ((Word16) 0);
 
-	/* compute gain = xy/yy */
+    /* compute gain = xy/yy */
 
-	xy >>= 1;                       /* Be sure xy < yy */
-	gain = div_s(xy, yy);
+    xy >>= 1;                       /* Be sure xy < yy */
+    gain = div_s(xy, yy);
 
-	i = exp_xy;
-	i -= exp_yy;
+    i = exp_xy;
+    i -= exp_yy;
 
-	gain = shl(gain, i);
+    gain = shl(gain, i);
 
-	/* if (gain > 1.2) gain = 1.2  in Q14 */
-	if(gain > 19661)
-	{
-		gain = 19661;
-	}
-	return (gain);
+    /* if (gain > 1.2) gain = 1.2  in Q14 */
+    if(gain > 19661)
+    {
+        gain = 19661;
+    }
+    return (gain);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/gpclip.c b/media/libstagefright/codecs/amrwbenc/src/gpclip.c
index 800b3f9..4ce3daa 100644
--- a/media/libstagefright/codecs/amrwbenc/src/gpclip.c
+++ b/media/libstagefright/codecs/amrwbenc/src/gpclip.c
@@ -35,75 +35,75 @@
 
 
 void Init_gp_clip(
-		Word16 mem[]                          /* (o) : memory of gain of pitch clipping algorithm */
-		)
+        Word16 mem[]                          /* (o) : memory of gain of pitch clipping algorithm */
+        )
 {
-	mem[0] = DIST_ISF_MAX;
-	mem[1] = GAIN_PIT_MIN;
+    mem[0] = DIST_ISF_MAX;
+    mem[1] = GAIN_PIT_MIN;
 }
 
 
 Word16 Gp_clip(
-		Word16 mem[]                          /* (i/o) : memory of gain of pitch clipping algorithm */
-	      )
+        Word16 mem[]                          /* (i/o) : memory of gain of pitch clipping algorithm */
+          )
 {
-	Word16 clip = 0;
-	if ((mem[0] < DIST_ISF_THRES) && (mem[1] > GAIN_PIT_THRES))
-		clip = 1;
+    Word16 clip = 0;
+    if ((mem[0] < DIST_ISF_THRES) && (mem[1] > GAIN_PIT_THRES))
+        clip = 1;
 
-	return (clip);
+    return (clip);
 }
 
 
 void Gp_clip_test_isf(
-		Word16 isf[],                         /* (i)   : isf values (in frequency domain)           */
-		Word16 mem[]                          /* (i/o) : memory of gain of pitch clipping algorithm */
-		)
+        Word16 isf[],                         /* (i)   : isf values (in frequency domain)           */
+        Word16 mem[]                          /* (i/o) : memory of gain of pitch clipping algorithm */
+        )
 {
-	Word16 dist, dist_min;
-	Word32 i;
+    Word16 dist, dist_min;
+    Word32 i;
 
-	dist_min = vo_sub(isf[1], isf[0]);
+    dist_min = vo_sub(isf[1], isf[0]);
 
-	for (i = 2; i < M - 1; i++)
-	{
-		dist = vo_sub(isf[i], isf[i - 1]);
-		if(dist < dist_min)
-		{
-			dist_min = dist;
-		}
-	}
+    for (i = 2; i < M - 1; i++)
+    {
+        dist = vo_sub(isf[i], isf[i - 1]);
+        if(dist < dist_min)
+        {
+            dist_min = dist;
+        }
+    }
 
-	dist = extract_h(L_mac(vo_L_mult(26214, mem[0]), 6554, dist_min));
+    dist = extract_h(L_mac(vo_L_mult(26214, mem[0]), 6554, dist_min));
 
-	if (dist > DIST_ISF_MAX)
-	{
-		dist = DIST_ISF_MAX;
-	}
-	mem[0] = dist;
+    if (dist > DIST_ISF_MAX)
+    {
+        dist = DIST_ISF_MAX;
+    }
+    mem[0] = dist;
 
-	return;
+    return;
 }
 
 
 void Gp_clip_test_gain_pit(
-		Word16 gain_pit,                      /* (i) Q14 : gain of quantized pitch                    */
-		Word16 mem[]                          /* (i/o)   : memory of gain of pitch clipping algorithm */
-		)
+        Word16 gain_pit,                      /* (i) Q14 : gain of quantized pitch                    */
+        Word16 mem[]                          /* (i/o)   : memory of gain of pitch clipping algorithm */
+        )
 {
-	Word16 gain;
-	Word32 L_tmp;
-	L_tmp = (29491 * mem[1])<<1;
-	L_tmp += (3277 * gain_pit)<<1;
+    Word16 gain;
+    Word32 L_tmp;
+    L_tmp = (29491 * mem[1])<<1;
+    L_tmp += (3277 * gain_pit)<<1;
 
-	gain = extract_h(L_tmp);
+    gain = extract_h(L_tmp);
 
-	if(gain < GAIN_PIT_MIN)
-	{
-		gain = GAIN_PIT_MIN;
-	}
-	mem[1] = gain;
-	return;
+    if(gain < GAIN_PIT_MIN)
+    {
+        gain = GAIN_PIT_MIN;
+    }
+    mem[1] = gain;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/homing.c b/media/libstagefright/codecs/amrwbenc/src/homing.c
index 565040f..a96e7db 100644
--- a/media/libstagefright/codecs/amrwbenc/src/homing.c
+++ b/media/libstagefright/codecs/amrwbenc/src/homing.c
@@ -29,18 +29,18 @@
 
 Word16 encoder_homing_frame_test(Word16 input_frame[])
 {
-	Word32 i;
-	Word16 j = 0;
+    Word32 i;
+    Word16 j = 0;
 
-	/* check 320 input samples for matching EHF_MASK: defined in e_homing.h */
-	for (i = 0; i < L_FRAME16k; i++)
-	{
-		j = (Word16) (input_frame[i] ^ EHF_MASK);
+    /* check 320 input samples for matching EHF_MASK: defined in e_homing.h */
+    for (i = 0; i < L_FRAME16k; i++)
+    {
+        j = (Word16) (input_frame[i] ^ EHF_MASK);
 
-		if (j)
-			break;
-	}
+        if (j)
+            break;
+    }
 
-	return (Word16) (!j);
+    return (Word16) (!j);
 }
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/hp400.c b/media/libstagefright/codecs/amrwbenc/src/hp400.c
index a6f9701..c658a92 100644
--- a/media/libstagefright/codecs/amrwbenc/src/hp400.c
+++ b/media/libstagefright/codecs/amrwbenc/src/hp400.c
@@ -50,56 +50,56 @@
 
 void Init_HP400_12k8(Word16 mem[])
 {
-	Set_zero(mem, 6);
+    Set_zero(mem, 6);
 }
 
 
 void HP400_12k8(
-		Word16 signal[],                      /* input signal / output is divided by 16 */
-		Word16 lg,                            /* lenght of signal    */
-		Word16 mem[]                          /* filter memory [6]   */
-	       )
+        Word16 signal[],                      /* input signal / output is divided by 16 */
+        Word16 lg,                            /* lenght of signal    */
+        Word16 mem[]                          /* filter memory [6]   */
+           )
 {
-	Word16  x2;
-	Word16 y2_hi, y2_lo, y1_hi, y1_lo, x0, x1;
-	Word32 L_tmp;
-	Word32 num;
-	y2_hi = *mem++;
-	y2_lo = *mem++;
-	y1_hi = *mem++;
-	y1_lo = *mem++;
-	x0 = *mem++;
-	x1 = *mem;
-	num = (Word32)lg;
-	do
-	{
-		x2 = x1;
-		x1 = x0;
-		x0 = *signal;
-		/* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2]  */
-		/* + a[1]*y[i-1] + a[2] * y[i-2];  */
-		L_tmp = 8192L;                    /* rounding to maximise precision */
-		L_tmp += y1_lo * a[1];
-		L_tmp += y2_lo * a[2];
-		L_tmp = L_tmp >> 14;
-		L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2)* b[0] + x1 * b[1]) << 1;
-		L_tmp <<= 1;           /* coeff Q12 --> Q13 */
-		y2_hi = y1_hi;
-		y2_lo = y1_lo;
-		y1_hi = (Word16)(L_tmp>>16);
-		y1_lo = (Word16)((L_tmp & 0xffff)>>1);
+    Word16  x2;
+    Word16 y2_hi, y2_lo, y1_hi, y1_lo, x0, x1;
+    Word32 L_tmp;
+    Word32 num;
+    y2_hi = *mem++;
+    y2_lo = *mem++;
+    y1_hi = *mem++;
+    y1_lo = *mem++;
+    x0 = *mem++;
+    x1 = *mem;
+    num = (Word32)lg;
+    do
+    {
+        x2 = x1;
+        x1 = x0;
+        x0 = *signal;
+        /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2]  */
+        /* + a[1]*y[i-1] + a[2] * y[i-2];  */
+        L_tmp = 8192L;                    /* rounding to maximise precision */
+        L_tmp += y1_lo * a[1];
+        L_tmp += y2_lo * a[2];
+        L_tmp = L_tmp >> 14;
+        L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2)* b[0] + x1 * b[1]) << 1;
+        L_tmp <<= 1;           /* coeff Q12 --> Q13 */
+        y2_hi = y1_hi;
+        y2_lo = y1_lo;
+        y1_hi = (Word16)(L_tmp>>16);
+        y1_lo = (Word16)((L_tmp & 0xffff)>>1);
 
-		/* signal is divided by 16 to avoid overflow in energy computation */
-		*signal++ = (L_tmp + 0x8000) >> 16;
-	}while(--num !=0);
+        /* signal is divided by 16 to avoid overflow in energy computation */
+        *signal++ = (L_tmp + 0x8000) >> 16;
+    }while(--num !=0);
 
-	*mem-- = x1;
-	*mem-- = x0;
-	*mem-- = y1_lo;
-	*mem-- = y1_hi;
-	*mem-- = y2_lo;
-	*mem   = y2_hi;
-	return;
+    *mem-- = x1;
+    *mem-- = x0;
+    *mem-- = y1_lo;
+    *mem-- = y1_hi;
+    *mem-- = y2_lo;
+    *mem   = y2_hi;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/hp50.c b/media/libstagefright/codecs/amrwbenc/src/hp50.c
index c1c7b83..807d672 100644
--- a/media/libstagefright/codecs/amrwbenc/src/hp50.c
+++ b/media/libstagefright/codecs/amrwbenc/src/hp50.c
@@ -17,7 +17,7 @@
 /***********************************************************************
 *      File: hp50.c                                                     *
 *                                                                       *
-*	   Description:                                                 *
+*      Description:                                                 *
 * 2nd order high pass filter with cut off frequency at 31 Hz.           *
 * Designed with cheby2 function in MATLAB.                              *
 * Optimized for fixed-point to get the following frequency response:    *
@@ -51,56 +51,56 @@
 
 void Init_HP50_12k8(Word16 mem[])
 {
-	Set_zero(mem, 6);
+    Set_zero(mem, 6);
 }
 
 
 void HP50_12k8(
-		Word16 signal[],                      /* input/output signal */
-		Word16 lg,                            /* lenght of signal    */
-		Word16 mem[]                          /* filter memory [6]   */
-	      )
+        Word16 signal[],                      /* input/output signal */
+        Word16 lg,                            /* lenght of signal    */
+        Word16 mem[]                          /* filter memory [6]   */
+          )
 {
-	Word16 x2;
-	Word16 y2_hi, y2_lo, y1_hi, y1_lo, x0, x1;
-	Word32 L_tmp;
-	Word32 num;
+    Word16 x2;
+    Word16 y2_hi, y2_lo, y1_hi, y1_lo, x0, x1;
+    Word32 L_tmp;
+    Word32 num;
 
-	y2_hi = *mem++;
-	y2_lo = *mem++;
-	y1_hi = *mem++;
-	y1_lo = *mem++;
-	x0 = *mem++;
-	x1 = *mem;
-	num = (Word32)lg;
-	do
-	{
-		x2 = x1;
-		x1 = x0;
-		x0 = *signal;
-		/* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2]  */
-		/* + a[1]*y[i-1] + a[2] * y[i-2];  */
-		L_tmp = 8192 ;                    /* rounding to maximise precision */
-		L_tmp += y1_lo * a[1];
-		L_tmp += y2_lo * a[2];
-		L_tmp = L_tmp >> 14;
-		L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2) * b[0] + x1 * b[1]) << 1;
-		L_tmp <<= 2;           /* coeff Q12 --> Q13 */
-		y2_hi = y1_hi;
-		y2_lo = y1_lo;
-		y1_hi = (Word16)(L_tmp>>16);
-		y1_lo = (Word16)((L_tmp & 0xffff)>>1);
-		*signal++ = extract_h((L_add((L_tmp<<1), 0x8000)));
-	}while(--num !=0);
+    y2_hi = *mem++;
+    y2_lo = *mem++;
+    y1_hi = *mem++;
+    y1_lo = *mem++;
+    x0 = *mem++;
+    x1 = *mem;
+    num = (Word32)lg;
+    do
+    {
+        x2 = x1;
+        x1 = x0;
+        x0 = *signal;
+        /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2]  */
+        /* + a[1]*y[i-1] + a[2] * y[i-2];  */
+        L_tmp = 8192 ;                    /* rounding to maximise precision */
+        L_tmp += y1_lo * a[1];
+        L_tmp += y2_lo * a[2];
+        L_tmp = L_tmp >> 14;
+        L_tmp += (y1_hi * a[1] + y2_hi * a[2] + (x0 + x2) * b[0] + x1 * b[1]) << 1;
+        L_tmp <<= 2;           /* coeff Q12 --> Q13 */
+        y2_hi = y1_hi;
+        y2_lo = y1_lo;
+        y1_hi = (Word16)(L_tmp>>16);
+        y1_lo = (Word16)((L_tmp & 0xffff)>>1);
+        *signal++ = extract_h((L_add((L_tmp<<1), 0x8000)));
+    }while(--num !=0);
 
-	*mem-- = x1;
-	*mem-- = x0;
-	*mem-- = y1_lo;
-	*mem-- = y1_hi;
-	*mem-- = y2_lo;
-	*mem-- = y2_hi;
+    *mem-- = x1;
+    *mem-- = x0;
+    *mem-- = y1_lo;
+    *mem-- = y1_hi;
+    *mem-- = y2_lo;
+    *mem-- = y2_hi;
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/hp6k.c b/media/libstagefright/codecs/amrwbenc/src/hp6k.c
index 8e66eb0..0baf612 100644
--- a/media/libstagefright/codecs/amrwbenc/src/hp6k.c
+++ b/media/libstagefright/codecs/amrwbenc/src/hp6k.c
@@ -17,10 +17,10 @@
 /***********************************************************************
 *       File: hp6k.c                                                    *
 *                                                                       *
-*	Description:15th order band pass 6kHz to 7kHz FIR filter        *
+*   Description:15th order band pass 6kHz to 7kHz FIR filter        *
 *       frequency: 4kHz   5kHz  5.5kHz  6kHz  6.5kHz  7kHz 7.5kHz 8kHz  *
-*	dB loss:  -60dB  -45dB  -13dB   -3dB   0dB    -3dB -13dB  -45dB *
-*	                                                                *
+*   dB loss:  -60dB  -45dB  -13dB   -3dB   0dB    -3dB -13dB  -45dB *
+*                                                                   *
 ************************************************************************/
 
 #include "typedef.h"
@@ -34,58 +34,58 @@
 
 Word16 fir_6k_7k[L_FIR] =
 {
-	-32, 47, 32, -27, -369,
-	1122, -1421, 0, 3798, -8880,
-	12349, -10984, 3548, 7766, -18001,
-	22118, -18001, 7766, 3548, -10984,
-	12349, -8880, 3798, 0, -1421,
-	1122, -369, -27, 32, 47,
-	-32
+    -32, 47, 32, -27, -369,
+    1122, -1421, 0, 3798, -8880,
+    12349, -10984, 3548, 7766, -18001,
+    22118, -18001, 7766, 3548, -10984,
+    12349, -8880, 3798, 0, -1421,
+    1122, -369, -27, 32, 47,
+    -32
 };
 
 
 void Init_Filt_6k_7k(Word16 mem[])         /* mem[30] */
 {
-	Set_zero(mem, L_FIR - 1);
-	return;
+    Set_zero(mem, L_FIR - 1);
+    return;
 }
 
 void Filt_6k_7k(
-		Word16 signal[],                      /* input:  signal                  */
-		Word16 lg,                            /* input:  length of input         */
-		Word16 mem[]                          /* in/out: memory (size=30)        */
-	       )
+        Word16 signal[],                      /* input:  signal                  */
+        Word16 lg,                            /* input:  length of input         */
+        Word16 mem[]                          /* in/out: memory (size=30)        */
+           )
 {
-	Word16 x[L_SUBFR16k + (L_FIR - 1)];
-	Word32 i, L_tmp;
+    Word16 x[L_SUBFR16k + (L_FIR - 1)];
+    Word32 i, L_tmp;
 
-	Copy(mem, x, L_FIR - 1);
-	for (i = lg - 1; i >= 0; i--)
-	{
-		x[i + L_FIR - 1] = signal[i] >> 2;                         /* gain of filter = 4 */
-	}
-	for (i = 0; i < lg; i++)
-	{
-		L_tmp =  (x[i] + x[i+ 30]) * fir_6k_7k[0];
-		L_tmp += (x[i+1] + x[i + 29]) * fir_6k_7k[1];
-		L_tmp += (x[i+2] + x[i + 28]) * fir_6k_7k[2];
-		L_tmp += (x[i+3] + x[i + 27]) * fir_6k_7k[3];
-		L_tmp += (x[i+4] + x[i + 26]) * fir_6k_7k[4];
-		L_tmp += (x[i+5] + x[i + 25]) * fir_6k_7k[5];
-		L_tmp += (x[i+6] + x[i + 24]) * fir_6k_7k[6];
-		L_tmp += (x[i+7] + x[i + 23]) * fir_6k_7k[7];
-		L_tmp += (x[i+8] + x[i + 22]) * fir_6k_7k[8];
-		L_tmp += (x[i+9] + x[i + 21]) * fir_6k_7k[9];
-		L_tmp += (x[i+10] + x[i + 20]) * fir_6k_7k[10];
-		L_tmp += (x[i+11] + x[i + 19]) * fir_6k_7k[11];
-		L_tmp += (x[i+12] + x[i + 18]) * fir_6k_7k[12];
-		L_tmp += (x[i+13] + x[i + 17]) * fir_6k_7k[13];
-		L_tmp += (x[i+14] + x[i + 16]) * fir_6k_7k[14];
-		L_tmp += (x[i+15]) * fir_6k_7k[15];
-		signal[i] = (L_tmp + 0x4000) >> 15;
-	}
+    Copy(mem, x, L_FIR - 1);
+    for (i = lg - 1; i >= 0; i--)
+    {
+        x[i + L_FIR - 1] = signal[i] >> 2;                         /* gain of filter = 4 */
+    }
+    for (i = 0; i < lg; i++)
+    {
+        L_tmp =  (x[i] + x[i+ 30]) * fir_6k_7k[0];
+        L_tmp += (x[i+1] + x[i + 29]) * fir_6k_7k[1];
+        L_tmp += (x[i+2] + x[i + 28]) * fir_6k_7k[2];
+        L_tmp += (x[i+3] + x[i + 27]) * fir_6k_7k[3];
+        L_tmp += (x[i+4] + x[i + 26]) * fir_6k_7k[4];
+        L_tmp += (x[i+5] + x[i + 25]) * fir_6k_7k[5];
+        L_tmp += (x[i+6] + x[i + 24]) * fir_6k_7k[6];
+        L_tmp += (x[i+7] + x[i + 23]) * fir_6k_7k[7];
+        L_tmp += (x[i+8] + x[i + 22]) * fir_6k_7k[8];
+        L_tmp += (x[i+9] + x[i + 21]) * fir_6k_7k[9];
+        L_tmp += (x[i+10] + x[i + 20]) * fir_6k_7k[10];
+        L_tmp += (x[i+11] + x[i + 19]) * fir_6k_7k[11];
+        L_tmp += (x[i+12] + x[i + 18]) * fir_6k_7k[12];
+        L_tmp += (x[i+13] + x[i + 17]) * fir_6k_7k[13];
+        L_tmp += (x[i+14] + x[i + 16]) * fir_6k_7k[14];
+        L_tmp += (x[i+15]) * fir_6k_7k[15];
+        signal[i] = (L_tmp + 0x4000) >> 15;
+    }
 
-	Copy(x + lg, mem, L_FIR - 1);
+    Copy(x + lg, mem, L_FIR - 1);
 
 }
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/hp_wsp.c b/media/libstagefright/codecs/amrwbenc/src/hp_wsp.c
index bc1ec49..f0347cb 100644
--- a/media/libstagefright/codecs/amrwbenc/src/hp_wsp.c
+++ b/media/libstagefright/codecs/amrwbenc/src/hp_wsp.c
@@ -48,101 +48,101 @@
 /* Initialization of static values */
 void Init_Hp_wsp(Word16 mem[])
 {
-	Set_zero(mem, 9);
+    Set_zero(mem, 9);
 
-	return;
+    return;
 }
 
 void scale_mem_Hp_wsp(Word16 mem[], Word16 exp)
 {
-	Word32 i;
-	Word32 L_tmp;
+    Word32 i;
+    Word32 L_tmp;
 
-	for (i = 0; i < 6; i += 2)
-	{
-		L_tmp = ((mem[i] << 16) + (mem[i + 1]<<1));
-		L_tmp = L_shl(L_tmp, exp);
-		mem[i] = L_tmp >> 16;
-		mem[i + 1] = (L_tmp & 0xffff)>>1;
-	}
+    for (i = 0; i < 6; i += 2)
+    {
+        L_tmp = ((mem[i] << 16) + (mem[i + 1]<<1));
+        L_tmp = L_shl(L_tmp, exp);
+        mem[i] = L_tmp >> 16;
+        mem[i + 1] = (L_tmp & 0xffff)>>1;
+    }
 
-	for (i = 6; i < 9; i++)
-	{
-		L_tmp = L_deposit_h(mem[i]);       /* x[i] */
-		L_tmp = L_shl(L_tmp, exp);
-		mem[i] = vo_round(L_tmp);
-	}
+    for (i = 6; i < 9; i++)
+    {
+        L_tmp = L_deposit_h(mem[i]);       /* x[i] */
+        L_tmp = L_shl(L_tmp, exp);
+        mem[i] = vo_round(L_tmp);
+    }
 
-	return;
+    return;
 }
 
 
 void Hp_wsp(
-		Word16 wsp[],                         /* i   : wsp[]  signal       */
-		Word16 hp_wsp[],                      /* o   : hypass wsp[]        */
-		Word16 lg,                            /* i   : lenght of signal    */
-		Word16 mem[]                          /* i/o : filter memory [9]   */
-	   )
+        Word16 wsp[],                         /* i   : wsp[]  signal       */
+        Word16 hp_wsp[],                      /* o   : hypass wsp[]        */
+        Word16 lg,                            /* i   : lenght of signal    */
+        Word16 mem[]                          /* i/o : filter memory [9]   */
+       )
 {
-	Word16 x0, x1, x2, x3;
-	Word16 y3_hi, y3_lo, y2_hi, y2_lo, y1_hi, y1_lo;
-	Word32 i, L_tmp;
+    Word16 x0, x1, x2, x3;
+    Word16 y3_hi, y3_lo, y2_hi, y2_lo, y1_hi, y1_lo;
+    Word32 i, L_tmp;
 
-	y3_hi = mem[0];
-	y3_lo = mem[1];
-	y2_hi = mem[2];
-	y2_lo = mem[3];
-	y1_hi = mem[4];
-	y1_lo = mem[5];
-	x0 = mem[6];
-	x1 = mem[7];
-	x2 = mem[8];
+    y3_hi = mem[0];
+    y3_lo = mem[1];
+    y2_hi = mem[2];
+    y2_lo = mem[3];
+    y1_hi = mem[4];
+    y1_lo = mem[5];
+    x0 = mem[6];
+    x1 = mem[7];
+    x2 = mem[8];
 
-	for (i = 0; i < lg; i++)
-	{
-		x3 = x2;
-		x2 = x1;
-		x1 = x0;
-		x0 = wsp[i];
-		/* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2] + b[3]*x[i-3]  */
-		/* + a[1]*y[i-1] + a[2] * y[i-2]  + a[3]*y[i-3]  */
+    for (i = 0; i < lg; i++)
+    {
+        x3 = x2;
+        x2 = x1;
+        x1 = x0;
+        x0 = wsp[i];
+        /* y[i] = b[0]*x[i] + b[1]*x[i-1] + b140[2]*x[i-2] + b[3]*x[i-3]  */
+        /* + a[1]*y[i-1] + a[2] * y[i-2]  + a[3]*y[i-3]  */
 
-		L_tmp = 16384L;                    /* rounding to maximise precision */
-		L_tmp += (y1_lo * a[1])<<1;
-		L_tmp += (y2_lo * a[2])<<1;
-		L_tmp += (y3_lo * a[3])<<1;
-		L_tmp = L_tmp >> 15;
-		L_tmp += (y1_hi * a[1])<<1;
-		L_tmp += (y2_hi * a[2])<<1;
-		L_tmp += (y3_hi * a[3])<<1;
-		L_tmp += (x0 * b[0])<<1;
-		L_tmp += (x1 * b[1])<<1;
-		L_tmp += (x2 * b[2])<<1;
-		L_tmp += (x3 * b[3])<<1;
+        L_tmp = 16384L;                    /* rounding to maximise precision */
+        L_tmp += (y1_lo * a[1])<<1;
+        L_tmp += (y2_lo * a[2])<<1;
+        L_tmp += (y3_lo * a[3])<<1;
+        L_tmp = L_tmp >> 15;
+        L_tmp += (y1_hi * a[1])<<1;
+        L_tmp += (y2_hi * a[2])<<1;
+        L_tmp += (y3_hi * a[3])<<1;
+        L_tmp += (x0 * b[0])<<1;
+        L_tmp += (x1 * b[1])<<1;
+        L_tmp += (x2 * b[2])<<1;
+        L_tmp += (x3 * b[3])<<1;
 
-		L_tmp = L_tmp << 2;
+        L_tmp = L_tmp << 2;
 
-		y3_hi = y2_hi;
-		y3_lo = y2_lo;
-		y2_hi = y1_hi;
-		y2_lo = y1_lo;
-		y1_hi = L_tmp >> 16;
-		y1_lo = (L_tmp & 0xffff) >>1;
+        y3_hi = y2_hi;
+        y3_lo = y2_lo;
+        y2_hi = y1_hi;
+        y2_lo = y1_lo;
+        y1_hi = L_tmp >> 16;
+        y1_lo = (L_tmp & 0xffff) >>1;
 
-		hp_wsp[i] = (L_tmp + 0x4000)>>15;
-	}
+        hp_wsp[i] = (L_tmp + 0x4000)>>15;
+    }
 
-	mem[0] = y3_hi;
-	mem[1] = y3_lo;
-	mem[2] = y2_hi;
-	mem[3] = y2_lo;
-	mem[4] = y1_hi;
-	mem[5] = y1_lo;
-	mem[6] = x0;
-	mem[7] = x1;
-	mem[8] = x2;
+    mem[0] = y3_hi;
+    mem[1] = y3_lo;
+    mem[2] = y2_hi;
+    mem[3] = y2_lo;
+    mem[4] = y1_hi;
+    mem[5] = y1_lo;
+    mem[6] = x0;
+    mem[7] = x1;
+    mem[8] = x2;
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/int_lpc.c b/media/libstagefright/codecs/amrwbenc/src/int_lpc.c
index 1119bc7..3d8b8cb 100644
--- a/media/libstagefright/codecs/amrwbenc/src/int_lpc.c
+++ b/media/libstagefright/codecs/amrwbenc/src/int_lpc.c
@@ -30,36 +30,36 @@
 
 
 void Int_isp(
-		Word16 isp_old[],                     /* input : isps from past frame              */
-		Word16 isp_new[],                     /* input : isps from present frame           */
-		Word16 frac[],                        /* input : fraction for 3 first subfr (Q15)  */
-		Word16 Az[]                           /* output: LP coefficients in 4 subframes    */
-	    )
+        Word16 isp_old[],                     /* input : isps from past frame              */
+        Word16 isp_new[],                     /* input : isps from present frame           */
+        Word16 frac[],                        /* input : fraction for 3 first subfr (Q15)  */
+        Word16 Az[]                           /* output: LP coefficients in 4 subframes    */
+        )
 {
-	Word32 i, k;
-	Word16 fac_old, fac_new;
-	Word16 isp[M];
-	Word32 L_tmp;
+    Word32 i, k;
+    Word16 fac_old, fac_new;
+    Word16 isp[M];
+    Word32 L_tmp;
 
-	for (k = 0; k < 3; k++)
-	{
-		fac_new = frac[k];
-		fac_old = (32767 - fac_new) + 1;  /* 1.0 - fac_new */
+    for (k = 0; k < 3; k++)
+    {
+        fac_new = frac[k];
+        fac_old = (32767 - fac_new) + 1;  /* 1.0 - fac_new */
 
-		for (i = 0; i < M; i++)
-		{
-			L_tmp = (isp_old[i] * fac_old)<<1;
-			L_tmp += (isp_new[i] * fac_new)<<1;
-			isp[i] = (L_tmp + 0x8000)>>16;
-		}
-		Isp_Az(isp, Az, M, 0);
-		Az += MP1;
-	}
+        for (i = 0; i < M; i++)
+        {
+            L_tmp = (isp_old[i] * fac_old)<<1;
+            L_tmp += (isp_new[i] * fac_new)<<1;
+            isp[i] = (L_tmp + 0x8000)>>16;
+        }
+        Isp_Az(isp, Az, M, 0);
+        Az += MP1;
+    }
 
-	/* 4th subframe: isp_new (frac=1.0) */
-	Isp_Az(isp_new, Az, M, 0);
+    /* 4th subframe: isp_new (frac=1.0) */
+    Isp_Az(isp_new, Az, M, 0);
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/isp_az.c b/media/libstagefright/codecs/amrwbenc/src/isp_az.c
index 30a8bbd..62e29e7 100644
--- a/media/libstagefright/codecs/amrwbenc/src/isp_az.c
+++ b/media/libstagefright/codecs/amrwbenc/src/isp_az.c
@@ -35,132 +35,132 @@
 static void Get_isp_pol_16kHz(Word16 * isp, Word32 * f, Word16 n);
 
 void Isp_Az(
-		Word16 isp[],                         /* (i) Q15 : Immittance spectral pairs            */
-		Word16 a[],                           /* (o) Q12 : predictor coefficients (order = M)   */
-		Word16 m,
-		Word16 adaptive_scaling               /* (i) 0   : adaptive scaling disabled */
-		                                      /*     1   : adaptive scaling enabled  */
-	   )
+        Word16 isp[],                         /* (i) Q15 : Immittance spectral pairs            */
+        Word16 a[],                           /* (o) Q12 : predictor coefficients (order = M)   */
+        Word16 m,
+        Word16 adaptive_scaling               /* (i) 0   : adaptive scaling disabled */
+                                              /*     1   : adaptive scaling enabled  */
+       )
 {
-	Word32 i, j;
-	Word16 hi, lo;
-	Word32 f1[NC16k + 1], f2[NC16k];
-	Word16 nc;
-	Word32 t0;
-	Word16 q, q_sug;
-	Word32 tmax;
+    Word32 i, j;
+    Word16 hi, lo;
+    Word32 f1[NC16k + 1], f2[NC16k];
+    Word16 nc;
+    Word32 t0;
+    Word16 q, q_sug;
+    Word32 tmax;
 
-	nc = (m >> 1);
-	if(nc > 8)
-	{
-		Get_isp_pol_16kHz(&isp[0], f1, nc);
-		for (i = 0; i <= nc; i++)
-		{
-			f1[i] = f1[i] << 2;
-		}
-	} else
-		Get_isp_pol(&isp[0], f1, nc);
+    nc = (m >> 1);
+    if(nc > 8)
+    {
+        Get_isp_pol_16kHz(&isp[0], f1, nc);
+        for (i = 0; i <= nc; i++)
+        {
+            f1[i] = f1[i] << 2;
+        }
+    } else
+        Get_isp_pol(&isp[0], f1, nc);
 
-	if (nc > 8)
-	{
-		Get_isp_pol_16kHz(&isp[1], f2, (nc - 1));
-		for (i = 0; i <= nc - 1; i++)
-		{
-			f2[i] = f2[i] << 2;
-		}
-	} else
-		Get_isp_pol(&isp[1], f2, (nc - 1));
+    if (nc > 8)
+    {
+        Get_isp_pol_16kHz(&isp[1], f2, (nc - 1));
+        for (i = 0; i <= nc - 1; i++)
+        {
+            f2[i] = f2[i] << 2;
+        }
+    } else
+        Get_isp_pol(&isp[1], f2, (nc - 1));
 
-	/*-----------------------------------------------------*
-	 *  Multiply F2(z) by (1 - z^-2)                       *
-	 *-----------------------------------------------------*/
+    /*-----------------------------------------------------*
+     *  Multiply F2(z) by (1 - z^-2)                       *
+     *-----------------------------------------------------*/
 
-	for (i = (nc - 1); i > 1; i--)
-	{
-		f2[i] = vo_L_sub(f2[i], f2[i - 2]);          /* f2[i] -= f2[i-2]; */
-	}
+    for (i = (nc - 1); i > 1; i--)
+    {
+        f2[i] = vo_L_sub(f2[i], f2[i - 2]);          /* f2[i] -= f2[i-2]; */
+    }
 
-	/*----------------------------------------------------------*
-	 *  Scale F1(z) by (1+isp[m-1])  and  F2(z) by (1-isp[m-1]) *
-	 *----------------------------------------------------------*/
+    /*----------------------------------------------------------*
+     *  Scale F1(z) by (1+isp[m-1])  and  F2(z) by (1-isp[m-1]) *
+     *----------------------------------------------------------*/
 
-	for (i = 0; i < nc; i++)
-	{
-		/* f1[i] *= (1.0 + isp[M-1]); */
+    for (i = 0; i < nc; i++)
+    {
+        /* f1[i] *= (1.0 + isp[M-1]); */
 
-		hi = f1[i] >> 16;
-		lo = (f1[i] & 0xffff)>>1;
+        hi = f1[i] >> 16;
+        lo = (f1[i] & 0xffff)>>1;
 
-		t0 = Mpy_32_16(hi, lo, isp[m - 1]);
-		f1[i] = vo_L_add(f1[i], t0);
+        t0 = Mpy_32_16(hi, lo, isp[m - 1]);
+        f1[i] = vo_L_add(f1[i], t0);
 
-		/* f2[i] *= (1.0 - isp[M-1]); */
+        /* f2[i] *= (1.0 - isp[M-1]); */
 
-		hi = f2[i] >> 16;
-		lo = (f2[i] & 0xffff)>>1;
-		t0 = Mpy_32_16(hi, lo, isp[m - 1]);
-		f2[i] = vo_L_sub(f2[i], t0);
-	}
+        hi = f2[i] >> 16;
+        lo = (f2[i] & 0xffff)>>1;
+        t0 = Mpy_32_16(hi, lo, isp[m - 1]);
+        f2[i] = vo_L_sub(f2[i], t0);
+    }
 
-	/*-----------------------------------------------------*
-	 *  A(z) = (F1(z)+F2(z))/2                             *
-	 *  F1(z) is symmetric and F2(z) is antisymmetric      *
-	 *-----------------------------------------------------*/
+    /*-----------------------------------------------------*
+     *  A(z) = (F1(z)+F2(z))/2                             *
+     *  F1(z) is symmetric and F2(z) is antisymmetric      *
+     *-----------------------------------------------------*/
 
-	/* a[0] = 1.0; */
-	a[0] = 4096;
-	tmax = 1;
-	for (i = 1, j = m - 1; i < nc; i++, j--)
-	{
-		/* a[i] = 0.5*(f1[i] + f2[i]); */
+    /* a[0] = 1.0; */
+    a[0] = 4096;
+    tmax = 1;
+    for (i = 1, j = m - 1; i < nc; i++, j--)
+    {
+        /* a[i] = 0.5*(f1[i] + f2[i]); */
 
-		t0 = vo_L_add(f1[i], f2[i]);          /* f1[i] + f2[i]             */
-		tmax |= L_abs(t0);
-		a[i] = (Word16)(vo_L_shr_r(t0, 12)); /* from Q23 to Q12 and * 0.5 */
+        t0 = vo_L_add(f1[i], f2[i]);          /* f1[i] + f2[i]             */
+        tmax |= L_abs(t0);
+        a[i] = (Word16)(vo_L_shr_r(t0, 12)); /* from Q23 to Q12 and * 0.5 */
 
-		/* a[j] = 0.5*(f1[i] - f2[i]); */
+        /* a[j] = 0.5*(f1[i] - f2[i]); */
 
-		t0 = vo_L_sub(f1[i], f2[i]);          /* f1[i] - f2[i]             */
-		tmax |= L_abs(t0);
-		a[j] = (Word16)(vo_L_shr_r(t0, 12)); /* from Q23 to Q12 and * 0.5 */
-	}
+        t0 = vo_L_sub(f1[i], f2[i]);          /* f1[i] - f2[i]             */
+        tmax |= L_abs(t0);
+        a[j] = (Word16)(vo_L_shr_r(t0, 12)); /* from Q23 to Q12 and * 0.5 */
+    }
 
-	/* rescale data if overflow has occured and reprocess the loop */
-	if(adaptive_scaling == 1)
-		q = 4 - norm_l(tmax);        /* adaptive scaling enabled */
-	else
-		q = 0;                           /* adaptive scaling disabled */
+    /* rescale data if overflow has occured and reprocess the loop */
+    if(adaptive_scaling == 1)
+        q = 4 - norm_l(tmax);        /* adaptive scaling enabled */
+    else
+        q = 0;                           /* adaptive scaling disabled */
 
-	if (q > 0)
-	{
-		q_sug = (12 + q);
-		for (i = 1, j = m - 1; i < nc; i++, j--)
-		{
-			/* a[i] = 0.5*(f1[i] + f2[i]); */
-			t0 = vo_L_add(f1[i], f2[i]);          /* f1[i] + f2[i]             */
-			a[i] = (Word16)(vo_L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
+    if (q > 0)
+    {
+        q_sug = (12 + q);
+        for (i = 1, j = m - 1; i < nc; i++, j--)
+        {
+            /* a[i] = 0.5*(f1[i] + f2[i]); */
+            t0 = vo_L_add(f1[i], f2[i]);          /* f1[i] + f2[i]             */
+            a[i] = (Word16)(vo_L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
 
-			/* a[j] = 0.5*(f1[i] - f2[i]); */
-			t0 = vo_L_sub(f1[i], f2[i]);          /* f1[i] - f2[i]             */
-			a[j] = (Word16)(vo_L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
-		}
-		a[0] = shr(a[0], q);
-	}
-	else
-	{
-		q_sug = 12;
-		q     = 0;
-	}
-	/* a[NC] = 0.5*f1[NC]*(1.0 + isp[M-1]); */
-	hi = f1[nc] >> 16;
-	lo = (f1[nc] & 0xffff)>>1;
-	t0 = Mpy_32_16(hi, lo, isp[m - 1]);
-	t0 = vo_L_add(f1[nc], t0);
-	a[nc] = (Word16)(L_shr_r(t0, q_sug));    /* from Q23 to Q12 and * 0.5 */
-	/* a[m] = isp[m-1]; */
+            /* a[j] = 0.5*(f1[i] - f2[i]); */
+            t0 = vo_L_sub(f1[i], f2[i]);          /* f1[i] - f2[i]             */
+            a[j] = (Word16)(vo_L_shr_r(t0, q_sug)); /* from Q23 to Q12 and * 0.5 */
+        }
+        a[0] = shr(a[0], q);
+    }
+    else
+    {
+        q_sug = 12;
+        q     = 0;
+    }
+    /* a[NC] = 0.5*f1[NC]*(1.0 + isp[M-1]); */
+    hi = f1[nc] >> 16;
+    lo = (f1[nc] & 0xffff)>>1;
+    t0 = Mpy_32_16(hi, lo, isp[m - 1]);
+    t0 = vo_L_add(f1[nc], t0);
+    a[nc] = (Word16)(L_shr_r(t0, q_sug));    /* from Q23 to Q12 and * 0.5 */
+    /* a[m] = isp[m-1]; */
 
-	a[m] = vo_shr_r(isp[m - 1], (3 + q));           /* from Q15 to Q12          */
-	return;
+    a[m] = vo_shr_r(isp[m - 1], (3 + q));           /* from Q15 to Q12          */
+    return;
 }
 
 /*-----------------------------------------------------------*
@@ -185,63 +185,63 @@
 
 static void Get_isp_pol(Word16 * isp, Word32 * f, Word16 n)
 {
-	Word16 hi, lo;
-	Word32 i, j, t0;
-	/* All computation in Q23 */
+    Word16 hi, lo;
+    Word32 i, j, t0;
+    /* All computation in Q23 */
 
-	f[0] = vo_L_mult(4096, 1024);               /* f[0] = 1.0;        in Q23  */
-	f[1] = vo_L_mult(isp[0], -256);             /* f[1] = -2.0*isp[0] in Q23  */
+    f[0] = vo_L_mult(4096, 1024);               /* f[0] = 1.0;        in Q23  */
+    f[1] = vo_L_mult(isp[0], -256);             /* f[1] = -2.0*isp[0] in Q23  */
 
-	f += 2;                                  /* Advance f pointer          */
-	isp += 2;                                /* Advance isp pointer        */
-	for (i = 2; i <= n; i++)
-	{
-		*f = f[-2];
-		for (j = 1; j < i; j++, f--)
-		{
-			hi = f[-1]>>16;
-			lo = (f[-1] & 0xffff)>>1;
+    f += 2;                                  /* Advance f pointer          */
+    isp += 2;                                /* Advance isp pointer        */
+    for (i = 2; i <= n; i++)
+    {
+        *f = f[-2];
+        for (j = 1; j < i; j++, f--)
+        {
+            hi = f[-1]>>16;
+            lo = (f[-1] & 0xffff)>>1;
 
-			t0 = Mpy_32_16(hi, lo, *isp);  /* t0 = f[-1] * isp    */
-			t0 = t0 << 1;
-			*f = vo_L_sub(*f, t0);              /* *f -= t0            */
-			*f = vo_L_add(*f, f[-2]);           /* *f += f[-2]         */
-		}
-		*f -= (*isp << 9);           /* *f -= isp<<8        */
-		f += i;                            /* Advance f pointer   */
-		isp += 2;                          /* Advance isp pointer */
-	}
-	return;
+            t0 = Mpy_32_16(hi, lo, *isp);  /* t0 = f[-1] * isp    */
+            t0 = t0 << 1;
+            *f = vo_L_sub(*f, t0);              /* *f -= t0            */
+            *f = vo_L_add(*f, f[-2]);           /* *f += f[-2]         */
+        }
+        *f -= (*isp << 9);           /* *f -= isp<<8        */
+        f += i;                            /* Advance f pointer   */
+        isp += 2;                          /* Advance isp pointer */
+    }
+    return;
 }
 
 static void Get_isp_pol_16kHz(Word16 * isp, Word32 * f, Word16 n)
 {
-	Word16 hi, lo;
-	Word32 i, j, t0;
+    Word16 hi, lo;
+    Word32 i, j, t0;
 
-	/* All computation in Q23 */
-	f[0] = L_mult(4096, 256);                /* f[0] = 1.0;        in Q23  */
-	f[1] = L_mult(isp[0], -64);              /* f[1] = -2.0*isp[0] in Q23  */
+    /* All computation in Q23 */
+    f[0] = L_mult(4096, 256);                /* f[0] = 1.0;        in Q23  */
+    f[1] = L_mult(isp[0], -64);              /* f[1] = -2.0*isp[0] in Q23  */
 
-	f += 2;                                  /* Advance f pointer          */
-	isp += 2;                                /* Advance isp pointer        */
+    f += 2;                                  /* Advance f pointer          */
+    isp += 2;                                /* Advance isp pointer        */
 
-	for (i = 2; i <= n; i++)
-	{
-		*f = f[-2];
-		for (j = 1; j < i; j++, f--)
-		{
-			VO_L_Extract(f[-1], &hi, &lo);
-			t0 = Mpy_32_16(hi, lo, *isp);  /* t0 = f[-1] * isp    */
-			t0 = L_shl2(t0, 1);
-			*f = L_sub(*f, t0);              /* *f -= t0            */
-			*f = L_add(*f, f[-2]);           /* *f += f[-2]         */
-		}
-		*f = L_msu(*f, *isp, 64);            /* *f -= isp<<8        */
-		f += i;                            /* Advance f pointer   */
-		isp += 2;                          /* Advance isp pointer */
-	}
-	return;
+    for (i = 2; i <= n; i++)
+    {
+        *f = f[-2];
+        for (j = 1; j < i; j++, f--)
+        {
+            VO_L_Extract(f[-1], &hi, &lo);
+            t0 = Mpy_32_16(hi, lo, *isp);  /* t0 = f[-1] * isp    */
+            t0 = L_shl2(t0, 1);
+            *f = L_sub(*f, t0);              /* *f -= t0            */
+            *f = L_add(*f, f[-2]);           /* *f += f[-2]         */
+        }
+        *f = L_msu(*f, *isp, 64);            /* *f -= isp<<8        */
+        f += i;                            /* Advance f pointer   */
+        isp += 2;                          /* Advance isp pointer */
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/isp_isf.c b/media/libstagefright/codecs/amrwbenc/src/isp_isf.c
index b4ba408..56798e0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/isp_isf.c
+++ b/media/libstagefright/codecs/amrwbenc/src/isp_isf.c
@@ -18,11 +18,11 @@
 *       File: isp_isf.c                                                *
 *                                                                      *
 *       Description:                                                   *
-*	Isp_isf   Transformation isp to isf                            *
-*	Isf_isp   Transformation isf to isp                            *
+*   Isp_isf   Transformation isp to isf                            *
+*   Isf_isp   Transformation isf to isp                            *
 *                                                                      *
-*	The transformation from isp[i] to isf[i] and isf[i] to isp[i]  *
-*	are approximated by a look-up table and interpolation          *
+*   The transformation from isp[i] to isf[i] and isf[i] to isp[i]  *
+*   are approximated by a look-up table and interpolation          *
 *                                                                      *
 ************************************************************************/
 
@@ -31,59 +31,59 @@
 #include "isp_isf.tab"                     /* Look-up table for transformations */
 
 void Isp_isf(
-		Word16 isp[],                         /* (i) Q15 : isp[m] (range: -1<=val<1)                */
-		Word16 isf[],                         /* (o) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
-		Word16 m                              /* (i)     : LPC order                                */
-	    )
+        Word16 isp[],                         /* (i) Q15 : isp[m] (range: -1<=val<1)                */
+        Word16 isf[],                         /* (o) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
+        Word16 m                              /* (i)     : LPC order                                */
+        )
 {
-	Word32 i, ind;
-	Word32 L_tmp;
-	ind = 127;                               /* beging at end of table -1 */
-	for (i = (m - 1); i >= 0; i--)
-	{
-		if (i >= (m - 2))
-		{                                  /* m-2 is a constant */
-			ind = 127;                       /* beging at end of table -1 */
-		}
-		/* find value in table that is just greater than isp[i] */
-		while (table[ind] < isp[i])
-			ind--;
-		/* acos(isp[i])= ind*128 + ( ( isp[i]-table[ind] ) * slope[ind] )/2048 */
-		L_tmp = vo_L_mult(vo_sub(isp[i], table[ind]), slope[ind]);
-		isf[i] = vo_round((L_tmp << 4));   /* (isp[i]-table[ind])*slope[ind])>>11 */
-		isf[i] = add1(isf[i], (ind << 7));
-	}
-	isf[m - 1] = (isf[m - 1] >> 1);
-	return;
+    Word32 i, ind;
+    Word32 L_tmp;
+    ind = 127;                               /* beging at end of table -1 */
+    for (i = (m - 1); i >= 0; i--)
+    {
+        if (i >= (m - 2))
+        {                                  /* m-2 is a constant */
+            ind = 127;                       /* beging at end of table -1 */
+        }
+        /* find value in table that is just greater than isp[i] */
+        while (table[ind] < isp[i])
+            ind--;
+        /* acos(isp[i])= ind*128 + ( ( isp[i]-table[ind] ) * slope[ind] )/2048 */
+        L_tmp = vo_L_mult(vo_sub(isp[i], table[ind]), slope[ind]);
+        isf[i] = vo_round((L_tmp << 4));   /* (isp[i]-table[ind])*slope[ind])>>11 */
+        isf[i] = add1(isf[i], (ind << 7));
+    }
+    isf[m - 1] = (isf[m - 1] >> 1);
+    return;
 }
 
 
 void Isf_isp(
-		Word16 isf[],                         /* (i) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
-		Word16 isp[],                         /* (o) Q15 : isp[m] (range: -1<=val<1)                */
-		Word16 m                              /* (i)     : LPC order                                */
-	    )
+        Word16 isf[],                         /* (i) Q15 : isf[m] normalized (range: 0.0<=val<=0.5) */
+        Word16 isp[],                         /* (o) Q15 : isp[m] (range: -1<=val<1)                */
+        Word16 m                              /* (i)     : LPC order                                */
+        )
 {
-	Word16 offset;
-	Word32 i, ind, L_tmp;
+    Word16 offset;
+    Word32 i, ind, L_tmp;
 
-	for (i = 0; i < m - 1; i++)
-	{
-		isp[i] = isf[i];
-	}
-	isp[m - 1] = (isf[m - 1] << 1);
+    for (i = 0; i < m - 1; i++)
+    {
+        isp[i] = isf[i];
+    }
+    isp[m - 1] = (isf[m - 1] << 1);
 
-	for (i = 0; i < m; i++)
-	{
-		ind = (isp[i] >> 7);                      /* ind    = b7-b15 of isf[i] */
-		offset = (Word16) (isp[i] & 0x007f);      /* offset = b0-b6  of isf[i] */
+    for (i = 0; i < m; i++)
+    {
+        ind = (isp[i] >> 7);                      /* ind    = b7-b15 of isf[i] */
+        offset = (Word16) (isp[i] & 0x007f);      /* offset = b0-b6  of isf[i] */
 
-		/* isp[i] = table[ind]+ ((table[ind+1]-table[ind])*offset) / 128 */
-		L_tmp = vo_L_mult(vo_sub(table[ind + 1], table[ind]), offset);
-		isp[i] = add1(table[ind], (Word16)((L_tmp >> 8)));
-	}
+        /* isp[i] = table[ind]+ ((table[ind+1]-table[ind])*offset) / 128 */
+        L_tmp = vo_L_mult(vo_sub(table[ind + 1], table[ind]), offset);
+        isp[i] = add1(table[ind], (Word16)((L_tmp >> 8)));
+    }
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/lag_wind.c b/media/libstagefright/codecs/amrwbenc/src/lag_wind.c
index 49c622c..527430b 100644
--- a/media/libstagefright/codecs/amrwbenc/src/lag_wind.c
+++ b/media/libstagefright/codecs/amrwbenc/src/lag_wind.c
@@ -17,8 +17,8 @@
 /***********************************************************************
 *      File: lag_wind.c                                                *
 *                                                                      *
-*	   Description: Lag_windows on autocorrelations                *
-*	                r[i] *= lag_wind[i]                            *
+*      Description: Lag_windows on autocorrelations                *
+*                   r[i] *= lag_wind[i]                            *
 *                                                                      *
 ************************************************************************/
 
@@ -29,20 +29,20 @@
 
 
 void Lag_window(
-		Word16 r_h[],                         /* (i/o)   : Autocorrelations  (msb)          */
-		Word16 r_l[]                          /* (i/o)   : Autocorrelations  (lsb)          */
-	       )
+        Word16 r_h[],                         /* (i/o)   : Autocorrelations  (msb)          */
+        Word16 r_l[]                          /* (i/o)   : Autocorrelations  (lsb)          */
+           )
 {
-	Word32 i;
-	Word32 x;
+    Word32 i;
+    Word32 x;
 
-	for (i = 1; i <= M; i++)
-	{
-		x = Mpy_32(r_h[i], r_l[i], volag_h[i - 1], volag_l[i - 1]);
-		r_h[i] = x >> 16;
-		r_l[i] = (x & 0xffff)>>1;
-	}
-	return;
+    for (i = 1; i <= M; i++)
+    {
+        x = Mpy_32(r_h[i], r_l[i], volag_h[i - 1], volag_l[i - 1]);
+        r_h[i] = x >> 16;
+        r_l[i] = (x & 0xffff)>>1;
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/levinson.c b/media/libstagefright/codecs/amrwbenc/src/levinson.c
index 4b2f8ed..9d5a3bd 100644
--- a/media/libstagefright/codecs/amrwbenc/src/levinson.c
+++ b/media/libstagefright/codecs/amrwbenc/src/levinson.c
@@ -21,7 +21,7 @@
 *                                                                      *
 ************************************************************************/
 /*---------------------------------------------------------------------------*
- *                         LEVINSON.C					     *
+ *                         LEVINSON.C                        *
  *---------------------------------------------------------------------------*
  *                                                                           *
  *      LEVINSON-DURBIN algorithm in double precision                        *
@@ -96,154 +96,154 @@
 #define NC  (M/2)
 
 void Init_Levinson(
-		Word16 * mem                          /* output  :static memory (18 words) */
-		)
+        Word16 * mem                          /* output  :static memory (18 words) */
+        )
 {
-	Set_zero(mem, 18);                     /* old_A[0..M-1] = 0, old_rc[0..1] = 0 */
-	return;
+    Set_zero(mem, 18);                     /* old_A[0..M-1] = 0, old_rc[0..1] = 0 */
+    return;
 }
 
 
 void Levinson(
-		Word16 Rh[],                          /* (i)     : Rh[M+1] Vector of autocorrelations (msb) */
-		Word16 Rl[],                          /* (i)     : Rl[M+1] Vector of autocorrelations (lsb) */
-		Word16 A[],                           /* (o) Q12 : A[M]    LPC coefficients  (m = 16)       */
-		Word16 rc[],                          /* (o) Q15 : rc[M]   Reflection coefficients.         */
-		Word16 * mem                          /* (i/o)   :static memory (18 words)                  */
-	     )
+        Word16 Rh[],                          /* (i)     : Rh[M+1] Vector of autocorrelations (msb) */
+        Word16 Rl[],                          /* (i)     : Rl[M+1] Vector of autocorrelations (lsb) */
+        Word16 A[],                           /* (o) Q12 : A[M]    LPC coefficients  (m = 16)       */
+        Word16 rc[],                          /* (o) Q15 : rc[M]   Reflection coefficients.         */
+        Word16 * mem                          /* (i/o)   :static memory (18 words)                  */
+         )
 {
-	Word32 i, j;
-	Word16 hi, lo;
-	Word16 Kh, Kl;                         /* reflection coefficient; hi and lo           */
-	Word16 alp_h, alp_l, alp_exp;          /* Prediction gain; hi lo and exponent         */
-	Word16 Ah[M + 1], Al[M + 1];           /* LPC coef. in double prec.                   */
-	Word16 Anh[M + 1], Anl[M + 1];         /* LPC coef.for next iteration in double prec. */
-	Word32 t0, t1, t2;                     /* temporary variable                          */
-	Word16 *old_A, *old_rc;
+    Word32 i, j;
+    Word16 hi, lo;
+    Word16 Kh, Kl;                         /* reflection coefficient; hi and lo           */
+    Word16 alp_h, alp_l, alp_exp;          /* Prediction gain; hi lo and exponent         */
+    Word16 Ah[M + 1], Al[M + 1];           /* LPC coef. in double prec.                   */
+    Word16 Anh[M + 1], Anl[M + 1];         /* LPC coef.for next iteration in double prec. */
+    Word32 t0, t1, t2;                     /* temporary variable                          */
+    Word16 *old_A, *old_rc;
 
-	/* Last A(z) for case of unstable filter */
-	old_A = mem;
-	old_rc = mem + M;
+    /* Last A(z) for case of unstable filter */
+    old_A = mem;
+    old_rc = mem + M;
 
-	/* K = A[1] = -R[1] / R[0] */
+    /* K = A[1] = -R[1] / R[0] */
 
-	t1 = ((Rh[1] << 16) + (Rl[1] << 1));   /* R[1] in Q31 */
-	t2 = L_abs(t1);                        /* abs R[1]         */
-	t0 = Div_32(t2, Rh[0], Rl[0]);         /* R[1]/R[0] in Q31 */
-	if (t1 > 0)
-		t0 = -t0;                          /* -R[1]/R[0]       */
+    t1 = ((Rh[1] << 16) + (Rl[1] << 1));   /* R[1] in Q31 */
+    t2 = L_abs(t1);                        /* abs R[1]         */
+    t0 = Div_32(t2, Rh[0], Rl[0]);         /* R[1]/R[0] in Q31 */
+    if (t1 > 0)
+        t0 = -t0;                          /* -R[1]/R[0]       */
 
-	Kh = t0 >> 16;
-	Kl = (t0 & 0xffff)>>1;
-	rc[0] = Kh;
-	t0 = (t0 >> 4);                        /* A[1] in Q27      */
+    Kh = t0 >> 16;
+    Kl = (t0 & 0xffff)>>1;
+    rc[0] = Kh;
+    t0 = (t0 >> 4);                        /* A[1] in Q27      */
 
-	Ah[1] = t0 >> 16;
-	Al[1] = (t0 & 0xffff)>>1;
+    Ah[1] = t0 >> 16;
+    Al[1] = (t0 & 0xffff)>>1;
 
-	/* Alpha = R[0] * (1-K**2) */
-	t0 = Mpy_32(Kh, Kl, Kh, Kl);           /* K*K      in Q31 */
-	t0 = L_abs(t0);                        /* Some case <0 !! */
-	t0 = vo_L_sub((Word32) 0x7fffffffL, t0);  /* 1 - K*K  in Q31 */
+    /* Alpha = R[0] * (1-K**2) */
+    t0 = Mpy_32(Kh, Kl, Kh, Kl);           /* K*K      in Q31 */
+    t0 = L_abs(t0);                        /* Some case <0 !! */
+    t0 = vo_L_sub((Word32) 0x7fffffffL, t0);  /* 1 - K*K  in Q31 */
 
-	hi = t0 >> 16;
-	lo = (t0 & 0xffff)>>1;
+    hi = t0 >> 16;
+    lo = (t0 & 0xffff)>>1;
 
-	t0 = Mpy_32(Rh[0], Rl[0], hi, lo);     /* Alpha in Q31    */
+    t0 = Mpy_32(Rh[0], Rl[0], hi, lo);     /* Alpha in Q31    */
 
-	/* Normalize Alpha */
-	alp_exp = norm_l(t0);
-	t0 = (t0 << alp_exp);
+    /* Normalize Alpha */
+    alp_exp = norm_l(t0);
+    t0 = (t0 << alp_exp);
 
-	alp_h = t0 >> 16;
-	alp_l = (t0 & 0xffff)>>1;
-	/*--------------------------------------*
-	 * ITERATIONS  I=2 to M                 *
-	 *--------------------------------------*/
-	for (i = 2; i <= M; i++)
-	{
-		/* t0 = SUM ( R[j]*A[i-j] ,j=1,i-1 ) +  R[i] */
-		t0 = 0;
-		for (j = 1; j < i; j++)
-			t0 = vo_L_add(t0, Mpy_32(Rh[j], Rl[j], Ah[i - j], Al[i - j]));
+    alp_h = t0 >> 16;
+    alp_l = (t0 & 0xffff)>>1;
+    /*--------------------------------------*
+     * ITERATIONS  I=2 to M                 *
+     *--------------------------------------*/
+    for (i = 2; i <= M; i++)
+    {
+        /* t0 = SUM ( R[j]*A[i-j] ,j=1,i-1 ) +  R[i] */
+        t0 = 0;
+        for (j = 1; j < i; j++)
+            t0 = vo_L_add(t0, Mpy_32(Rh[j], Rl[j], Ah[i - j], Al[i - j]));
 
-		t0 = t0 << 4;                 /* result in Q27 -> convert to Q31 */
-		/* No overflow possible            */
-		t1 = ((Rh[i] << 16) + (Rl[i] << 1));
-		t0 = vo_L_add(t0, t1);                /* add R[i] in Q31                 */
+        t0 = t0 << 4;                 /* result in Q27 -> convert to Q31 */
+        /* No overflow possible            */
+        t1 = ((Rh[i] << 16) + (Rl[i] << 1));
+        t0 = vo_L_add(t0, t1);                /* add R[i] in Q31                 */
 
-		/* K = -t0 / Alpha */
-		t1 = L_abs(t0);
-		t2 = Div_32(t1, alp_h, alp_l);     /* abs(t0)/Alpha                   */
-		if (t0 > 0)
-			t2 = -t2;                   /* K =-t0/Alpha                    */
-		t2 = (t2 << alp_exp);           /* denormalize; compare to Alpha   */
+        /* K = -t0 / Alpha */
+        t1 = L_abs(t0);
+        t2 = Div_32(t1, alp_h, alp_l);     /* abs(t0)/Alpha                   */
+        if (t0 > 0)
+            t2 = -t2;                   /* K =-t0/Alpha                    */
+        t2 = (t2 << alp_exp);           /* denormalize; compare to Alpha   */
 
-		Kh = t2 >> 16;
-		Kl = (t2 & 0xffff)>>1;
+        Kh = t2 >> 16;
+        Kl = (t2 & 0xffff)>>1;
 
-		rc[i - 1] = Kh;
-		/* Test for unstable filter. If unstable keep old A(z) */
-		if (abs_s(Kh) > 32750)
-		{
-			A[0] = 4096;                    /* Ai[0] not stored (always 1.0) */
-			for (j = 0; j < M; j++)
-			{
-				A[j + 1] = old_A[j];
-			}
-			rc[0] = old_rc[0];             /* only two rc coefficients are needed */
-			rc[1] = old_rc[1];
-			return;
-		}
-		/*------------------------------------------*
-		 *  Compute new LPC coeff. -> An[i]         *
-		 *  An[j]= A[j] + K*A[i-j]     , j=1 to i-1 *
-		 *  An[i]= K                                *
-		 *------------------------------------------*/
-		for (j = 1; j < i; j++)
-		{
-			t0 = Mpy_32(Kh, Kl, Ah[i - j], Al[i - j]);
-			t0 = vo_L_add(t0, ((Ah[j] << 16) + (Al[j] << 1)));
-			Anh[j] = t0 >> 16;
-			Anl[j] = (t0 & 0xffff)>>1;
-		}
-		t2 = (t2 >> 4);                 /* t2 = K in Q31 ->convert to Q27  */
+        rc[i - 1] = Kh;
+        /* Test for unstable filter. If unstable keep old A(z) */
+        if (abs_s(Kh) > 32750)
+        {
+            A[0] = 4096;                    /* Ai[0] not stored (always 1.0) */
+            for (j = 0; j < M; j++)
+            {
+                A[j + 1] = old_A[j];
+            }
+            rc[0] = old_rc[0];             /* only two rc coefficients are needed */
+            rc[1] = old_rc[1];
+            return;
+        }
+        /*------------------------------------------*
+         *  Compute new LPC coeff. -> An[i]         *
+         *  An[j]= A[j] + K*A[i-j]     , j=1 to i-1 *
+         *  An[i]= K                                *
+         *------------------------------------------*/
+        for (j = 1; j < i; j++)
+        {
+            t0 = Mpy_32(Kh, Kl, Ah[i - j], Al[i - j]);
+            t0 = vo_L_add(t0, ((Ah[j] << 16) + (Al[j] << 1)));
+            Anh[j] = t0 >> 16;
+            Anl[j] = (t0 & 0xffff)>>1;
+        }
+        t2 = (t2 >> 4);                 /* t2 = K in Q31 ->convert to Q27  */
 
-		VO_L_Extract(t2, &Anh[i], &Anl[i]);   /* An[i] in Q27                    */
+        VO_L_Extract(t2, &Anh[i], &Anl[i]);   /* An[i] in Q27                    */
 
-		/* Alpha = Alpha * (1-K**2) */
-		t0 = Mpy_32(Kh, Kl, Kh, Kl);               /* K*K      in Q31 */
-		t0 = L_abs(t0);                            /* Some case <0 !! */
-		t0 = vo_L_sub((Word32) 0x7fffffffL, t0);   /* 1 - K*K  in Q31 */
-		hi = t0 >> 16;
-		lo = (t0 & 0xffff)>>1;
-		t0 = Mpy_32(alp_h, alp_l, hi, lo); /* Alpha in Q31    */
+        /* Alpha = Alpha * (1-K**2) */
+        t0 = Mpy_32(Kh, Kl, Kh, Kl);               /* K*K      in Q31 */
+        t0 = L_abs(t0);                            /* Some case <0 !! */
+        t0 = vo_L_sub((Word32) 0x7fffffffL, t0);   /* 1 - K*K  in Q31 */
+        hi = t0 >> 16;
+        lo = (t0 & 0xffff)>>1;
+        t0 = Mpy_32(alp_h, alp_l, hi, lo); /* Alpha in Q31    */
 
-		/* Normalize Alpha */
-		j = norm_l(t0);
-		t0 = (t0 << j);
-		alp_h = t0 >> 16;
-		alp_l = (t0 & 0xffff)>>1;
-		alp_exp += j;         /* Add normalization to alp_exp */
+        /* Normalize Alpha */
+        j = norm_l(t0);
+        t0 = (t0 << j);
+        alp_h = t0 >> 16;
+        alp_l = (t0 & 0xffff)>>1;
+        alp_exp += j;         /* Add normalization to alp_exp */
 
-		/* A[j] = An[j] */
-		for (j = 1; j <= i; j++)
-		{
-			Ah[j] = Anh[j];
-			Al[j] = Anl[j];
-		}
-	}
-	/* Truncate A[i] in Q27 to Q12 with rounding */
-	A[0] = 4096;
-	for (i = 1; i <= M; i++)
-	{
-		t0 = (Ah[i] << 16) + (Al[i] << 1);
-		old_A[i - 1] = A[i] = vo_round((t0 << 1));
-	}
-	old_rc[0] = rc[0];
-	old_rc[1] = rc[1];
+        /* A[j] = An[j] */
+        for (j = 1; j <= i; j++)
+        {
+            Ah[j] = Anh[j];
+            Al[j] = Anl[j];
+        }
+    }
+    /* Truncate A[i] in Q27 to Q12 with rounding */
+    A[0] = 4096;
+    for (i = 1; i <= M; i++)
+    {
+        t0 = (Ah[i] << 16) + (Al[i] << 1);
+        old_A[i - 1] = A[i] = vo_round((t0 << 1));
+    }
+    old_rc[0] = rc[0];
+    old_rc[1] = rc[1];
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/log2.c b/media/libstagefright/codecs/amrwbenc/src/log2.c
index 0f65541..f14058e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/log2.c
+++ b/media/libstagefright/codecs/amrwbenc/src/log2.c
@@ -54,33 +54,33 @@
 *************************************************************************/
 
 void Log2_norm (
-		Word32 L_x,         /* (i) : input value (normalized)                    */
-		Word16 exp,         /* (i) : norm_l (L_x)                                */
-		Word16 *exponent,   /* (o) : Integer part of Log2.   (range: 0<=val<=30) */
-		Word16 *fraction    /* (o) : Fractional part of Log2. (range: 0<=val<1)  */
-	       )
+        Word32 L_x,         /* (i) : input value (normalized)                    */
+        Word16 exp,         /* (i) : norm_l (L_x)                                */
+        Word16 *exponent,   /* (o) : Integer part of Log2.   (range: 0<=val<=30) */
+        Word16 *fraction    /* (o) : Fractional part of Log2. (range: 0<=val<1)  */
+           )
 {
-	Word16 i, a, tmp;
-	Word32 L_y;
-	if (L_x <= (Word32) 0)
-	{
-		*exponent = 0;
-		*fraction = 0;
-		return;
-	}
-	*exponent = (30 - exp);
-	L_x = (L_x >> 9);
-	i = extract_h (L_x);                /* Extract b25-b31 */
-	L_x = (L_x >> 1);
-	a = (Word16)(L_x);                /* Extract b10-b24 of fraction */
-	a = (Word16)(a & (Word16)0x7fff);
-	i -= 32;
-	L_y = L_deposit_h (table[i]);       /* table[i] << 16        */
-	tmp = vo_sub(table[i], table[i + 1]); /* table[i] - table[i+1] */
-	L_y = vo_L_msu (L_y, tmp, a);          /* L_y -= tmp*a*2        */
-	*fraction = extract_h (L_y);
+    Word16 i, a, tmp;
+    Word32 L_y;
+    if (L_x <= (Word32) 0)
+    {
+        *exponent = 0;
+        *fraction = 0;
+        return;
+    }
+    *exponent = (30 - exp);
+    L_x = (L_x >> 9);
+    i = extract_h (L_x);                /* Extract b25-b31 */
+    L_x = (L_x >> 1);
+    a = (Word16)(L_x);                /* Extract b10-b24 of fraction */
+    a = (Word16)(a & (Word16)0x7fff);
+    i -= 32;
+    L_y = L_deposit_h (table[i]);       /* table[i] << 16        */
+    tmp = vo_sub(table[i], table[i + 1]); /* table[i] - table[i+1] */
+    L_y = vo_L_msu (L_y, tmp, a);          /* L_y -= tmp*a*2        */
+    *fraction = extract_h (L_y);
 
-	return;
+    return;
 }
 
 /*************************************************************************
@@ -96,15 +96,15 @@
 *************************************************************************/
 
 void Log2 (
-		Word32 L_x,         /* (i) : input value                                 */
-		Word16 *exponent,   /* (o) : Integer part of Log2.   (range: 0<=val<=30) */
-		Word16 *fraction    /* (o) : Fractional part of Log2. (range: 0<=val<1) */
-	  )
+        Word32 L_x,         /* (i) : input value                                 */
+        Word16 *exponent,   /* (o) : Integer part of Log2.   (range: 0<=val<=30) */
+        Word16 *fraction    /* (o) : Fractional part of Log2. (range: 0<=val<1) */
+      )
 {
-	Word16 exp;
+    Word16 exp;
 
-	exp = norm_l(L_x);
-	Log2_norm ((L_x << exp), exp, exponent, fraction);
+    exp = norm_l(L_x);
+    Log2_norm ((L_x << exp), exp, exponent, fraction);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/lp_dec2.c b/media/libstagefright/codecs/amrwbenc/src/lp_dec2.c
index 1d5d076..9a9dd34 100644
--- a/media/libstagefright/codecs/amrwbenc/src/lp_dec2.c
+++ b/media/libstagefright/codecs/amrwbenc/src/lp_dec2.c
@@ -17,7 +17,7 @@
 /***********************************************************************
 *       File: lp_dec2.c                                                *
 *                                                                      *
-*	Description:Decimate a vector by 2 with 2nd order fir filter   *
+*   Description:Decimate a vector by 2 with 2nd order fir filter   *
 *                                                                      *
 ************************************************************************/
 
@@ -33,36 +33,36 @@
 static Word16 h_fir[L_FIR] = {4260, 7536, 9175, 7536, 4260};
 
 void LP_Decim2(
-		Word16 x[],                           /* in/out: signal to process         */
-		Word16 l,                             /* input : size of filtering         */
-		Word16 mem[]                          /* in/out: memory (size=3)           */
-	      )
+        Word16 x[],                           /* in/out: signal to process         */
+        Word16 l,                             /* input : size of filtering         */
+        Word16 mem[]                          /* in/out: memory (size=3)           */
+          )
 {
-	Word16 *p_x, x_buf[L_FRAME + L_MEM];
-	Word32 i, j;
-	Word32 L_tmp;
-	/* copy initial filter states into buffer */
-	p_x = x_buf;
-	for (i = 0; i < L_MEM; i++)
-	{
-		*p_x++ = mem[i];
-		mem[i] = x[l - L_MEM + i];
-	}
-	for (i = 0; i < l; i++)
-	{
-		*p_x++ = x[i];
-	}
-	for (i = 0, j = 0; i < l; i += 2, j++)
-	{
-		p_x = &x_buf[i];
-		L_tmp  = ((*p_x++) * h_fir[0]);
-		L_tmp += ((*p_x++) * h_fir[1]);
-		L_tmp += ((*p_x++) * h_fir[2]);
-		L_tmp += ((*p_x++) * h_fir[3]);
-		L_tmp += ((*p_x++) * h_fir[4]);
-		x[j] = (L_tmp + 0x4000)>>15;
-	}
-	return;
+    Word16 *p_x, x_buf[L_FRAME + L_MEM];
+    Word32 i, j;
+    Word32 L_tmp;
+    /* copy initial filter states into buffer */
+    p_x = x_buf;
+    for (i = 0; i < L_MEM; i++)
+    {
+        *p_x++ = mem[i];
+        mem[i] = x[l - L_MEM + i];
+    }
+    for (i = 0; i < l; i++)
+    {
+        *p_x++ = x[i];
+    }
+    for (i = 0, j = 0; i < l; i += 2, j++)
+    {
+        p_x = &x_buf[i];
+        L_tmp  = ((*p_x++) * h_fir[0]);
+        L_tmp += ((*p_x++) * h_fir[1]);
+        L_tmp += ((*p_x++) * h_fir[2]);
+        L_tmp += ((*p_x++) * h_fir[3]);
+        L_tmp += ((*p_x++) * h_fir[4]);
+        x[j] = (L_tmp + 0x4000)>>15;
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/math_op.c b/media/libstagefright/codecs/amrwbenc/src/math_op.c
index 7affbb2..9d7c74e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/math_op.c
+++ b/media/libstagefright/codecs/amrwbenc/src/math_op.c
@@ -55,17 +55,17 @@
 |___________________________________________________________________________|
 */
 Word32 Isqrt(                              /* (o) Q31 : output value (range: 0<=val<1)         */
-		Word32 L_x                            /* (i) Q0  : input value  (range: 0<=val<=7fffffff) */
-	    )
+        Word32 L_x                            /* (i) Q0  : input value  (range: 0<=val<=7fffffff) */
+        )
 {
-	Word16 exp;
-	Word32 L_y;
-	exp = norm_l(L_x);
-	L_x = (L_x << exp);                 /* L_x is normalized */
-	exp = (31 - exp);
-	Isqrt_n(&L_x, &exp);
-	L_y = (L_x << exp);                 /* denormalization   */
-	return (L_y);
+    Word16 exp;
+    Word32 L_y;
+    exp = norm_l(L_x);
+    L_x = (L_x << exp);                 /* L_x is normalized */
+    exp = (31 - exp);
+    Isqrt_n(&L_x, &exp);
+    L_y = (L_x << exp);                 /* denormalization   */
+    return (L_y);
 }
 
 /*___________________________________________________________________________
@@ -90,43 +90,43 @@
 */
 static Word16 table_isqrt[49] =
 {
-	32767, 31790, 30894, 30070, 29309, 28602, 27945, 27330, 26755, 26214,
-	25705, 25225, 24770, 24339, 23930, 23541, 23170, 22817, 22479, 22155,
-	21845, 21548, 21263, 20988, 20724, 20470, 20225, 19988, 19760, 19539,
-	19326, 19119, 18919, 18725, 18536, 18354, 18176, 18004, 17837, 17674,
-	17515, 17361, 17211, 17064, 16921, 16782, 16646, 16514, 16384
+    32767, 31790, 30894, 30070, 29309, 28602, 27945, 27330, 26755, 26214,
+    25705, 25225, 24770, 24339, 23930, 23541, 23170, 22817, 22479, 22155,
+    21845, 21548, 21263, 20988, 20724, 20470, 20225, 19988, 19760, 19539,
+    19326, 19119, 18919, 18725, 18536, 18354, 18176, 18004, 17837, 17674,
+    17515, 17361, 17211, 17064, 16921, 16782, 16646, 16514, 16384
 };
 
 void Isqrt_n(
-		Word32 * frac,                        /* (i/o) Q31: normalized value (1.0 < frac <= 0.5) */
-		Word16 * exp                          /* (i/o)    : exponent (value = frac x 2^exponent) */
-	    )
+        Word32 * frac,                        /* (i/o) Q31: normalized value (1.0 < frac <= 0.5) */
+        Word16 * exp                          /* (i/o)    : exponent (value = frac x 2^exponent) */
+        )
 {
-	Word16 i, a, tmp;
+    Word16 i, a, tmp;
 
-	if (*frac <= (Word32) 0)
-	{
-		*exp = 0;
-		*frac = 0x7fffffffL;
-		return;
-	}
+    if (*frac <= (Word32) 0)
+    {
+        *exp = 0;
+        *frac = 0x7fffffffL;
+        return;
+    }
 
-	if((*exp & 1) == 1)                       /*If exponant odd -> shift right */
-		*frac = (*frac) >> 1;
+    if((*exp & 1) == 1)                       /*If exponant odd -> shift right */
+        *frac = (*frac) >> 1;
 
-	*exp = negate((*exp - 1) >> 1);
+    *exp = negate((*exp - 1) >> 1);
 
-	*frac = (*frac >> 9);
-	i = extract_h(*frac);                  /* Extract b25-b31 */
-	*frac = (*frac >> 1);
-	a = (Word16)(*frac);                  /* Extract b10-b24 */
-	a = (Word16) (a & (Word16) 0x7fff);
-	i -= 16;
-	*frac = L_deposit_h(table_isqrt[i]);   /* table[i] << 16         */
-	tmp = vo_sub(table_isqrt[i], table_isqrt[i + 1]);      /* table[i] - table[i+1]) */
-	*frac = vo_L_msu(*frac, tmp, a);          /* frac -=  tmp*a*2       */
+    *frac = (*frac >> 9);
+    i = extract_h(*frac);                  /* Extract b25-b31 */
+    *frac = (*frac >> 1);
+    a = (Word16)(*frac);                  /* Extract b10-b24 */
+    a = (Word16) (a & (Word16) 0x7fff);
+    i -= 16;
+    *frac = L_deposit_h(table_isqrt[i]);   /* table[i] << 16         */
+    tmp = vo_sub(table_isqrt[i], table_isqrt[i + 1]);      /* table[i] - table[i+1]) */
+    *frac = vo_L_msu(*frac, tmp, a);          /* frac -=  tmp*a*2       */
 
-	return;
+    return;
 }
 
 /*___________________________________________________________________________
@@ -149,34 +149,34 @@
 */
 static Word16 table_pow2[33] =
 {
-	16384, 16743, 17109, 17484, 17867, 18258, 18658, 19066, 19484, 19911,
-	20347, 20792, 21247, 21713, 22188, 22674, 23170, 23678, 24196, 24726,
-	25268, 25821, 26386, 26964, 27554, 28158, 28774, 29405, 30048, 30706,
-	31379, 32066, 32767
+    16384, 16743, 17109, 17484, 17867, 18258, 18658, 19066, 19484, 19911,
+    20347, 20792, 21247, 21713, 22188, 22674, 23170, 23678, 24196, 24726,
+    25268, 25821, 26386, 26964, 27554, 28158, 28774, 29405, 30048, 30706,
+    31379, 32066, 32767
 };
 
 Word32 Pow2(                               /* (o) Q0  : result       (range: 0<=val<=0x7fffffff) */
-		Word16 exponant,                      /* (i) Q0  : Integer part.      (range: 0<=val<=30)   */
-		Word16 fraction                       /* (i) Q15 : Fractionnal part.  (range: 0.0<=val<1.0) */
-	   )
+        Word16 exponant,                      /* (i) Q0  : Integer part.      (range: 0<=val<=30)   */
+        Word16 fraction                       /* (i) Q15 : Fractionnal part.  (range: 0.0<=val<1.0) */
+       )
 {
-	Word16 exp, i, a, tmp;
-	Word32 L_x;
+    Word16 exp, i, a, tmp;
+    Word32 L_x;
 
-	L_x = vo_L_mult(fraction, 32);            /* L_x = fraction<<6           */
-	i = extract_h(L_x);                    /* Extract b10-b16 of fraction */
-	L_x =L_x >> 1;
-	a = (Word16)(L_x);                    /* Extract b0-b9   of fraction */
-	a = (Word16) (a & (Word16) 0x7fff);
+    L_x = vo_L_mult(fraction, 32);            /* L_x = fraction<<6           */
+    i = extract_h(L_x);                    /* Extract b10-b16 of fraction */
+    L_x =L_x >> 1;
+    a = (Word16)(L_x);                    /* Extract b0-b9   of fraction */
+    a = (Word16) (a & (Word16) 0x7fff);
 
-	L_x = L_deposit_h(table_pow2[i]);      /* table[i] << 16        */
-	tmp = vo_sub(table_pow2[i], table_pow2[i + 1]);        /* table[i] - table[i+1] */
-	L_x -= (tmp * a)<<1;              /* L_x -= tmp*a*2        */
+    L_x = L_deposit_h(table_pow2[i]);      /* table[i] << 16        */
+    tmp = vo_sub(table_pow2[i], table_pow2[i + 1]);        /* table[i] - table[i+1] */
+    L_x -= (tmp * a)<<1;              /* L_x -= tmp*a*2        */
 
-	exp = vo_sub(30, exponant);
-	L_x = vo_L_shr_r(L_x, exp);
+    exp = vo_sub(30, exponant);
+    L_x = vo_L_shr_r(L_x, exp);
 
-	return (L_x);
+    return (L_x);
 }
 
 /*___________________________________________________________________________
@@ -194,25 +194,30 @@
 */
 
 Word32 Dot_product12(                      /* (o) Q31: normalized result (1 < val <= -1) */
-		Word16 x[],                           /* (i) 12bits: x vector                       */
-		Word16 y[],                           /* (i) 12bits: y vector                       */
-		Word16 lg,                            /* (i)    : vector length                     */
-		Word16 * exp                          /* (o)    : exponent of result (0..+30)       */
-		)
+        Word16 x[],                           /* (i) 12bits: x vector                       */
+        Word16 y[],                           /* (i) 12bits: y vector                       */
+        Word16 lg,                            /* (i)    : vector length                     */
+        Word16 * exp                          /* (o)    : exponent of result (0..+30)       */
+        )
 {
-	Word16 sft;
-	Word32 i, L_sum;
-	L_sum = 0;
-	for (i = 0; i < lg; i++)
-	{
-		L_sum += x[i] * y[i];
-	}
-	L_sum = (L_sum << 1) + 1;
-	/* Normalize acc in Q31 */
-	sft = norm_l(L_sum);
-	L_sum = L_sum << sft;
-	*exp = 30 - sft;            /* exponent = 0..30 */
-	return (L_sum);
+    Word16 sft;
+    Word32 i, L_sum;
+    L_sum = 0;
+    for (i = 0; i < lg; i++)
+    {
+        Word32 tmp = (Word32) x[i] * (Word32) y[i];
+        if (tmp == (Word32) 0x40000000L) {
+            tmp = MAX_32;
+        }
+        L_sum = L_add(L_sum, tmp);
+    }
+    L_sum = L_shl2(L_sum, 1);
+    L_sum = L_add(L_sum, 1);
+    /* Normalize acc in Q31 */
+    sft = norm_l(L_sum);
+    L_sum = L_sum << sft;
+    *exp = 30 - sft;            /* exponent = 0..30 */
+    return (L_sum);
 
 }
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/mem_align.c b/media/libstagefright/codecs/amrwbenc/src/mem_align.c
index 3b7853f..04e5976 100644
--- a/media/libstagefright/codecs/amrwbenc/src/mem_align.c
+++ b/media/libstagefright/codecs/amrwbenc/src/mem_align.c
@@ -15,18 +15,18 @@
  */
 
 /*******************************************************************************
-	File:		mem_align.c
+    File:       mem_align.c
 
-	Content:	Memory alloc alignments functions
+    Content:    Memory alloc alignments functions
 
 *******************************************************************************/
 
 
-#include	"mem_align.h"
+#include    "mem_align.h"
 #ifdef _MSC_VER
-#include	<stddef.h>
+#include    <stddef.h>
 #else
-#include	<stdint.h>
+#include    <stdint.h>
 #endif
 
 /*****************************************************************************
@@ -39,50 +39,50 @@
 void *
 mem_malloc(VO_MEM_OPERATOR *pMemop, unsigned int size, unsigned char alignment, unsigned int CodecID)
 {
-	int ret;
-	unsigned char *mem_ptr;
-	VO_MEM_INFO MemInfo;
+    int ret;
+    unsigned char *mem_ptr;
+    VO_MEM_INFO MemInfo;
 
-	if (!alignment) {
+    if (!alignment) {
 
-		MemInfo.Flag = 0;
-		MemInfo.Size = size + 1;
-		ret = pMemop->Alloc(CodecID, &MemInfo);
-		if(ret != 0)
-			return 0;
-		mem_ptr = (unsigned char *)MemInfo.VBuffer;
+        MemInfo.Flag = 0;
+        MemInfo.Size = size + 1;
+        ret = pMemop->Alloc(CodecID, &MemInfo);
+        if(ret != 0)
+            return 0;
+        mem_ptr = (unsigned char *)MemInfo.VBuffer;
 
-		pMemop->Set(CodecID, mem_ptr, 0, size + 1);
+        pMemop->Set(CodecID, mem_ptr, 0, size + 1);
 
-		*mem_ptr = (unsigned char)1;
+        *mem_ptr = (unsigned char)1;
 
-		return ((void *)(mem_ptr+1));
-	} else {
-		unsigned char *tmp;
+        return ((void *)(mem_ptr+1));
+    } else {
+        unsigned char *tmp;
 
-		MemInfo.Flag = 0;
-		MemInfo.Size = size + alignment;
-		ret = pMemop->Alloc(CodecID, &MemInfo);
-		if(ret != 0)
-			return 0;
+        MemInfo.Flag = 0;
+        MemInfo.Size = size + alignment;
+        ret = pMemop->Alloc(CodecID, &MemInfo);
+        if(ret != 0)
+            return 0;
 
-		tmp = (unsigned char *)MemInfo.VBuffer;
+        tmp = (unsigned char *)MemInfo.VBuffer;
 
-		pMemop->Set(CodecID, tmp, 0, size + alignment);
+        pMemop->Set(CodecID, tmp, 0, size + alignment);
 
-		mem_ptr =
-			(unsigned char *) ((intptr_t) (tmp + alignment - 1) &
-					(~((intptr_t) (alignment - 1))));
+        mem_ptr =
+            (unsigned char *) ((intptr_t) (tmp + alignment - 1) &
+                    (~((intptr_t) (alignment - 1))));
 
-		if (mem_ptr == tmp)
-			mem_ptr += alignment;
+        if (mem_ptr == tmp)
+            mem_ptr += alignment;
 
-		*(mem_ptr - 1) = (unsigned char) (mem_ptr - tmp);
+        *(mem_ptr - 1) = (unsigned char) (mem_ptr - tmp);
 
-		return ((void *)mem_ptr);
-	}
+        return ((void *)mem_ptr);
+    }
 
-	return(0);
+    return(0);
 }
 
 
@@ -96,16 +96,16 @@
 mem_free(VO_MEM_OPERATOR *pMemop, void *mem_ptr, unsigned int CodecID)
 {
 
-	unsigned char *ptr;
+    unsigned char *ptr;
 
-	if (mem_ptr == 0)
-		return;
+    if (mem_ptr == 0)
+        return;
 
-	ptr = mem_ptr;
+    ptr = mem_ptr;
 
-	ptr -= *(ptr - 1);
+    ptr -= *(ptr - 1);
 
-	pMemop->Free(CodecID, ptr);
+    pMemop->Free(CodecID, ptr);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/oper_32b.c b/media/libstagefright/codecs/amrwbenc/src/oper_32b.c
index 27cad76..e6f80d0 100644
--- a/media/libstagefright/codecs/amrwbenc/src/oper_32b.c
+++ b/media/libstagefright/codecs/amrwbenc/src/oper_32b.c
@@ -56,9 +56,9 @@
 
 __inline void VO_L_Extract (Word32 L_32, Word16 *hi, Word16 *lo)
 {
-	*hi = (Word16)(L_32 >> 16);
-	*lo = (Word16)((L_32 & 0xffff) >> 1);
-	return;
+    *hi = (Word16)(L_32 >> 16);
+    *lo = (Word16)((L_32 & 0xffff) >> 1);
+    return;
 }
 
 /*****************************************************************************
@@ -84,11 +84,11 @@
 
 Word32 L_Comp (Word16 hi, Word16 lo)
 {
-	Word32 L_32;
+    Word32 L_32;
 
-	L_32 = L_deposit_h (hi);
+    L_32 = L_deposit_h (hi);
 
-	return (L_mac (L_32, lo, 1));       /* = hi<<16 + lo<<1 */
+    return (L_mac (L_32, lo, 1));       /* = hi<<16 + lo<<1 */
 }
 
 /*****************************************************************************
@@ -113,13 +113,13 @@
 
 __inline Word32  Mpy_32 (Word16 hi1, Word16 lo1, Word16 hi2, Word16 lo2)
 {
-	Word32 L_32;
-	L_32 = (hi1 * hi2);
-	L_32 += (hi1 * lo2) >> 15;
-	L_32 += (lo1 * hi2) >> 15;
-	L_32 <<= 1;
+    Word32 L_32;
+    L_32 = (hi1 * hi2);
+    L_32 += (hi1 * lo2) >> 15;
+    L_32 += (lo1 * hi2) >> 15;
+    L_32 <<= 1;
 
-	return (L_32);
+    return (L_32);
 }
 
 /*****************************************************************************
@@ -142,12 +142,12 @@
 
 __inline Word32 Mpy_32_16 (Word16 hi, Word16 lo, Word16 n)
 {
-	Word32 L_32;
+    Word32 L_32;
 
-	L_32 = (hi * n)<<1;
-	L_32 += (((lo * n)>>15)<<1);
+    L_32 = (hi * n)<<1;
+    L_32 += (((lo * n)>>15)<<1);
 
-	return (L_32);
+    return (L_32);
 }
 
 /*****************************************************************************
@@ -194,30 +194,30 @@
 
 Word32 Div_32 (Word32 L_num, Word16 denom_hi, Word16 denom_lo)
 {
-	Word16 approx, hi, lo, n_hi, n_lo;
-	Word32 L_32;
+    Word16 approx, hi, lo, n_hi, n_lo;
+    Word32 L_32;
 
-	/* First approximation: 1 / L_denom = 1/denom_hi */
+    /* First approximation: 1 / L_denom = 1/denom_hi */
 
-	approx = div_s ((Word16) 0x3fff, denom_hi);
+    approx = div_s ((Word16) 0x3fff, denom_hi);
 
-	/* 1/L_denom = approx * (2.0 - L_denom * approx) */
+    /* 1/L_denom = approx * (2.0 - L_denom * approx) */
 
-	L_32 = Mpy_32_16 (denom_hi, denom_lo, approx);
+    L_32 = Mpy_32_16 (denom_hi, denom_lo, approx);
 
-	L_32 = L_sub ((Word32) 0x7fffffffL, L_32);
-	hi = L_32 >> 16;
-	lo = (L_32 & 0xffff) >> 1;
+    L_32 = L_sub ((Word32) 0x7fffffffL, L_32);
+    hi = L_32 >> 16;
+    lo = (L_32 & 0xffff) >> 1;
 
-	L_32 = Mpy_32_16 (hi, lo, approx);
+    L_32 = Mpy_32_16 (hi, lo, approx);
 
-	/* L_num * (1/L_denom) */
-	hi = L_32 >> 16;
-	lo = (L_32 & 0xffff) >> 1;
-	VO_L_Extract (L_num, &n_hi, &n_lo);
-	L_32 = Mpy_32 (n_hi, n_lo, hi, lo);
-	L_32 = L_shl2(L_32, 2);
+    /* L_num * (1/L_denom) */
+    hi = L_32 >> 16;
+    lo = (L_32 & 0xffff) >> 1;
+    VO_L_Extract (L_num, &n_hi, &n_lo);
+    L_32 = Mpy_32 (n_hi, n_lo, hi, lo);
+    L_32 = L_shl2(L_32, 2);
 
-	return (L_32);
+    return (L_32);
 }
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/p_med_ol.c b/media/libstagefright/codecs/amrwbenc/src/p_med_ol.c
index b8174b9..5d2b4bd 100644
--- a/media/libstagefright/codecs/amrwbenc/src/p_med_ol.c
+++ b/media/libstagefright/codecs/amrwbenc/src/p_med_ol.c
@@ -18,7 +18,7 @@
 *      File: p_med_ol.c                                                *
 *                                                                      *
 *      Description: Compute the open loop pitch lag                    *
-*	            output: open loop pitch lag                        *
+*               output: open loop pitch lag                        *
 ************************************************************************/
 
 #include "typedef.h"
@@ -29,131 +29,131 @@
 #include "p_med_ol.tab"
 
 Word16 Pitch_med_ol(
-		   Word16      wsp[],        /*   i: signal used to compute the open loop pitch*/
+           Word16      wsp[],        /*   i: signal used to compute the open loop pitch*/
                                      /*      wsp[-pit_max] to wsp[-1] should be known */
-		   Coder_State *st,          /* i/o: codec global structure */
-		   Word16      L_frame       /*   i: length of frame to compute pitch */
-		)
+           Coder_State *st,          /* i/o: codec global structure */
+           Word16      L_frame       /*   i: length of frame to compute pitch */
+        )
 {
-	Word16 Tm;
-	Word16 hi, lo;
-	Word16 *ww, *we, *hp_wsp;
-	Word16 exp_R0, exp_R1, exp_R2;
-	Word32 i, j, max, R0, R1, R2;
-	Word16 *p1, *p2;
-	Word16 L_min = 17;                   /* minimum pitch lag: PIT_MIN / OPL_DECIM */
-	Word16 L_max = 115;                  /* maximum pitch lag: PIT_MAX / OPL_DECIM */
-	Word16 L_0 = st->old_T0_med;         /* old open-loop pitch */
-	Word16 *gain = &(st->ol_gain);       /* normalize correlation of hp_wsp for the lag */
-	Word16 *hp_wsp_mem = st->hp_wsp_mem; /* memory of the hypass filter for hp_wsp[] (lg = 9)*/
-	Word16 *old_hp_wsp = st->old_hp_wsp; /* hypass wsp[] */
-	Word16 wght_flg = st->ol_wght_flg;   /* is weighting function used */
+    Word16 Tm;
+    Word16 hi, lo;
+    Word16 *ww, *we, *hp_wsp;
+    Word16 exp_R0, exp_R1, exp_R2;
+    Word32 i, j, max, R0, R1, R2;
+    Word16 *p1, *p2;
+    Word16 L_min = 17;                   /* minimum pitch lag: PIT_MIN / OPL_DECIM */
+    Word16 L_max = 115;                  /* maximum pitch lag: PIT_MAX / OPL_DECIM */
+    Word16 L_0 = st->old_T0_med;         /* old open-loop pitch */
+    Word16 *gain = &(st->ol_gain);       /* normalize correlation of hp_wsp for the lag */
+    Word16 *hp_wsp_mem = st->hp_wsp_mem; /* memory of the hypass filter for hp_wsp[] (lg = 9)*/
+    Word16 *old_hp_wsp = st->old_hp_wsp; /* hypass wsp[] */
+    Word16 wght_flg = st->ol_wght_flg;   /* is weighting function used */
 
-	ww = &corrweight[198];
-	we = &corrweight[98 + L_max - L_0];
+    ww = &corrweight[198];
+    we = &corrweight[98 + L_max - L_0];
 
-	max = MIN_32;
-	Tm = 0;
-	for (i = L_max; i > L_min; i--)
-	{
-		/* Compute the correlation */
-		R0 = 0;
-		p1 = wsp;
-		p2 = &wsp[-i];
-		for (j = 0; j < L_frame; j+=4)
-		{
-			R0 += vo_L_mult((*p1++), (*p2++));
-			R0 += vo_L_mult((*p1++), (*p2++));
-			R0 += vo_L_mult((*p1++), (*p2++));
-			R0 += vo_L_mult((*p1++), (*p2++));
-		}
-		/* Weighting of the correlation function.   */
-		hi = R0>>16;
-		lo = (R0 & 0xffff)>>1;
+    max = MIN_32;
+    Tm = 0;
+    for (i = L_max; i > L_min; i--)
+    {
+        /* Compute the correlation */
+        R0 = 0;
+        p1 = wsp;
+        p2 = &wsp[-i];
+        for (j = 0; j < L_frame; j+=4)
+        {
+            R0 += vo_L_mult((*p1++), (*p2++));
+            R0 += vo_L_mult((*p1++), (*p2++));
+            R0 += vo_L_mult((*p1++), (*p2++));
+            R0 += vo_L_mult((*p1++), (*p2++));
+        }
+        /* Weighting of the correlation function.   */
+        hi = R0>>16;
+        lo = (R0 & 0xffff)>>1;
 
-		R0 = Mpy_32_16(hi, lo, *ww);
-		ww--;
+        R0 = Mpy_32_16(hi, lo, *ww);
+        ww--;
 
-		if ((L_0 > 0) && (wght_flg > 0))
-		{
-			/* Weight the neighbourhood of the old lag. */
-			hi = R0>>16;
-			lo = (R0 & 0xffff)>>1;
-			R0 = Mpy_32_16(hi, lo, *we);
-			we--;
-		}
-		if(R0 >= max)
-		{
-			max = R0;
-			Tm = i;
-		}
-	}
+        if ((L_0 > 0) && (wght_flg > 0))
+        {
+            /* Weight the neighbourhood of the old lag. */
+            hi = R0>>16;
+            lo = (R0 & 0xffff)>>1;
+            R0 = Mpy_32_16(hi, lo, *we);
+            we--;
+        }
+        if(R0 >= max)
+        {
+            max = R0;
+            Tm = i;
+        }
+    }
 
-	/* Hypass the wsp[] vector */
-	hp_wsp = old_hp_wsp + L_max;
-	Hp_wsp(wsp, hp_wsp, L_frame, hp_wsp_mem);
+    /* Hypass the wsp[] vector */
+    hp_wsp = old_hp_wsp + L_max;
+    Hp_wsp(wsp, hp_wsp, L_frame, hp_wsp_mem);
 
-	/* Compute normalize correlation at delay Tm */
-	R0 = 0;
-	R1 = 0;
-	R2 = 0;
-	p1 = hp_wsp;
-	p2 = hp_wsp - Tm;
-	for (j = 0; j < L_frame; j+=4)
-	{
-		R2 += vo_mult32(*p1, *p1);
-		R1 += vo_mult32(*p2, *p2);
-		R0 += vo_mult32(*p1++, *p2++);
-		R2 += vo_mult32(*p1, *p1);
-		R1 += vo_mult32(*p2, *p2);
-		R0 += vo_mult32(*p1++, *p2++);
-		R2 += vo_mult32(*p1, *p1);
-		R1 += vo_mult32(*p2, *p2);
-		R0 += vo_mult32(*p1++, *p2++);
-		R2 += vo_mult32(*p1, *p1);
-		R1 += vo_mult32(*p2, *p2);
-		R0 += vo_mult32(*p1++, *p2++);
-	}
-	R0 = R0 <<1;
-	R1 = (R1 <<1) + 1L;
-	R2 = (R2 <<1) + 1L;
-	/* gain = R0/ sqrt(R1*R2) */
+    /* Compute normalize correlation at delay Tm */
+    R0 = 0;
+    R1 = 0;
+    R2 = 0;
+    p1 = hp_wsp;
+    p2 = hp_wsp - Tm;
+    for (j = 0; j < L_frame; j+=4)
+    {
+        R2 += vo_mult32(*p1, *p1);
+        R1 += vo_mult32(*p2, *p2);
+        R0 += vo_mult32(*p1++, *p2++);
+        R2 += vo_mult32(*p1, *p1);
+        R1 += vo_mult32(*p2, *p2);
+        R0 += vo_mult32(*p1++, *p2++);
+        R2 += vo_mult32(*p1, *p1);
+        R1 += vo_mult32(*p2, *p2);
+        R0 += vo_mult32(*p1++, *p2++);
+        R2 += vo_mult32(*p1, *p1);
+        R1 += vo_mult32(*p2, *p2);
+        R0 += vo_mult32(*p1++, *p2++);
+    }
+    R0 = R0 <<1;
+    R1 = (R1 <<1) + 1L;
+    R2 = (R2 <<1) + 1L;
+    /* gain = R0/ sqrt(R1*R2) */
 
-	exp_R0 = norm_l(R0);
-	R0 = (R0 << exp_R0);
+    exp_R0 = norm_l(R0);
+    R0 = (R0 << exp_R0);
 
-	exp_R1 = norm_l(R1);
-	R1 = (R1 << exp_R1);
+    exp_R1 = norm_l(R1);
+    R1 = (R1 << exp_R1);
 
-	exp_R2 = norm_l(R2);
-	R2 = (R2 << exp_R2);
+    exp_R2 = norm_l(R2);
+    R2 = (R2 << exp_R2);
 
 
-	R1 = vo_L_mult(vo_round(R1), vo_round(R2));
+    R1 = vo_L_mult(voround(R1), voround(R2));
 
-	i = norm_l(R1);
-	R1 = (R1 << i);
+    i = norm_l(R1);
+    R1 = (R1 << i);
 
-	exp_R1 += exp_R2;
-	exp_R1 += i;
-	exp_R1 = 62 - exp_R1;
+    exp_R1 += exp_R2;
+    exp_R1 += i;
+    exp_R1 = 62 - exp_R1;
 
-	Isqrt_n(&R1, &exp_R1);
+    Isqrt_n(&R1, &exp_R1);
 
-	R0 = vo_L_mult(voround(R0), voround(R1));
-	exp_R0 = 31 - exp_R0;
-	exp_R0 += exp_R1;
+    R0 = vo_L_mult(voround(R0), voround(R1));
+    exp_R0 = 31 - exp_R0;
+    exp_R0 += exp_R1;
 
-	*gain = vo_round(L_shl(R0, exp_R0));
+    *gain = vo_round(L_shl(R0, exp_R0));
 
-	/* Shitf hp_wsp[] for next frame */
+    /* Shitf hp_wsp[] for next frame */
 
-	for (i = 0; i < L_max; i++)
-	{
-		old_hp_wsp[i] = old_hp_wsp[i + L_frame];
-	}
+    for (i = 0; i < L_max; i++)
+    {
+        old_hp_wsp[i] = old_hp_wsp[i + L_frame];
+    }
 
-	return (Tm);
+    return (Tm);
 }
 
 /************************************************************************
@@ -171,84 +171,84 @@
 
 Word16 median5(Word16 x[])
 {
-	Word16 x1, x2, x3, x4, x5;
-	Word16 tmp;
+    Word16 x1, x2, x3, x4, x5;
+    Word16 tmp;
 
-	x1 = x[-2];
-	x2 = x[-1];
-	x3 = x[0];
-	x4 = x[1];
-	x5 = x[2];
+    x1 = x[-2];
+    x2 = x[-1];
+    x3 = x[0];
+    x4 = x[1];
+    x5 = x[2];
 
-	if (x2 < x1)
-	{
-		tmp = x1;
-		x1 = x2;
-		x2 = tmp;
-	}
-	if (x3 < x1)
-	{
-		tmp = x1;
-		x1 = x3;
-		x3 = tmp;
-	}
-	if (x4 < x1)
-	{
-		tmp = x1;
-		x1 = x4;
-		x4 = tmp;
-	}
-	if (x5 < x1)
-	{
-		x5 = x1;
-	}
-	if (x3 < x2)
-	{
-		tmp = x2;
-		x2 = x3;
-		x3 = tmp;
-	}
-	if (x4 < x2)
-	{
-		tmp = x2;
-		x2 = x4;
-		x4 = tmp;
-	}
-	if (x5 < x2)
-	{
-		x5 = x2;
-	}
-	if (x4 < x3)
-	{
-		x3 = x4;
-	}
-	if (x5 < x3)
-	{
-		x3 = x5;
-	}
-	return (x3);
+    if (x2 < x1)
+    {
+        tmp = x1;
+        x1 = x2;
+        x2 = tmp;
+    }
+    if (x3 < x1)
+    {
+        tmp = x1;
+        x1 = x3;
+        x3 = tmp;
+    }
+    if (x4 < x1)
+    {
+        tmp = x1;
+        x1 = x4;
+        x4 = tmp;
+    }
+    if (x5 < x1)
+    {
+        x5 = x1;
+    }
+    if (x3 < x2)
+    {
+        tmp = x2;
+        x2 = x3;
+        x3 = tmp;
+    }
+    if (x4 < x2)
+    {
+        tmp = x2;
+        x2 = x4;
+        x4 = tmp;
+    }
+    if (x5 < x2)
+    {
+        x5 = x2;
+    }
+    if (x4 < x3)
+    {
+        x3 = x4;
+    }
+    if (x5 < x3)
+    {
+        x3 = x5;
+    }
+    return (x3);
 }
 
 
 Word16 Med_olag(                           /* output : median of  5 previous open-loop lags       */
-		Word16 prev_ol_lag,                /* input  : previous open-loop lag                     */
-		Word16 old_ol_lag[5]
-	       )
+        Word16 prev_ol_lag,                /* input  : previous open-loop lag                     */
+        Word16 old_ol_lag[5]
+           )
 {
-	Word32 i;
+    Word32 i;
 
-	/* Use median of 5 previous open-loop lags as old lag */
+    /* Use median of 5 previous open-loop lags as old lag */
 
-	for (i = 4; i > 0; i--)
-	{
-		old_ol_lag[i] = old_ol_lag[i - 1];
-	}
+    for (i = 4; i > 0; i--)
+    {
+        old_ol_lag[i] = old_ol_lag[i - 1];
+    }
 
-	old_ol_lag[0] = prev_ol_lag;
+    old_ol_lag[0] = prev_ol_lag;
 
-	i = median5(&old_ol_lag[2]);
+    i = median5(&old_ol_lag[2]);
 
-	return i;
+    return i;
 
 }
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/pit_shrp.c b/media/libstagefright/codecs/amrwbenc/src/pit_shrp.c
index 6f55b8f..f100253 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pit_shrp.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pit_shrp.c
@@ -25,24 +25,24 @@
 #include "basic_op.h"
 
 void Pit_shrp(
-		Word16 * x,                           /* in/out: impulse response (or algebraic code) */
-		Word16 pit_lag,                       /* input : pitch lag                            */
-		Word16 sharp,                         /* input : pitch sharpening factor (Q15)        */
-		Word16 L_subfr                        /* input : subframe size                        */
-	     )
+        Word16 * x,                           /* in/out: impulse response (or algebraic code) */
+        Word16 pit_lag,                       /* input : pitch lag                            */
+        Word16 sharp,                         /* input : pitch sharpening factor (Q15)        */
+        Word16 L_subfr                        /* input : subframe size                        */
+         )
 {
-	Word32 i;
-	Word32 L_tmp;
-	Word16 *x_ptr = x + pit_lag;
+    Word32 i;
+    Word32 L_tmp;
+    Word16 *x_ptr = x + pit_lag;
 
-	for (i = pit_lag; i < L_subfr; i++)
-	{
-		L_tmp = (*x_ptr << 15);
-		L_tmp += *x++ * sharp;
-		*x_ptr++ = ((L_tmp + 0x4000)>>15);
-	}
+    for (i = pit_lag; i < L_subfr; i++)
+    {
+        L_tmp = (*x_ptr << 15);
+        L_tmp += *x++ * sharp;
+        *x_ptr++ = ((L_tmp + 0x4000)>>15);
+    }
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
index b66b55e..b453b25 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pitch_f4.c
@@ -18,7 +18,7 @@
 *      File: pitch_f4.c                                                *
 *                                                                      *
 *      Description: Find the closed loop pitch period with             *
-*	            1/4 subsample resolution.                          *
+*               1/4 subsample resolution.                          *
 *                                                                      *
 ************************************************************************/
 
@@ -37,117 +37,117 @@
 
 #ifdef ASM_OPT
 void Norm_corr_asm(
-		Word16 exc[],                         /* (i)     : excitation buffer                     */
-		Word16 xn[],                          /* (i)     : target vector                         */
-		Word16 h[],                           /* (i) Q15 : impulse response of synth/wgt filters */
-		Word16 L_subfr,
-		Word16 t_min,                         /* (i)     : minimum value of pitch lag.           */
-		Word16 t_max,                         /* (i)     : maximum value of pitch lag.           */
-		Word16 corr_norm[]                    /* (o) Q15 : normalized correlation                */
-		);
+        Word16 exc[],                         /* (i)     : excitation buffer                     */
+        Word16 xn[],                          /* (i)     : target vector                         */
+        Word16 h[],                           /* (i) Q15 : impulse response of synth/wgt filters */
+        Word16 L_subfr,
+        Word16 t_min,                         /* (i)     : minimum value of pitch lag.           */
+        Word16 t_max,                         /* (i)     : maximum value of pitch lag.           */
+        Word16 corr_norm[]                    /* (o) Q15 : normalized correlation                */
+        );
 #else
 static void Norm_Corr(
-		Word16 exc[],                         /* (i)     : excitation buffer                     */
-		Word16 xn[],                          /* (i)     : target vector                         */
-		Word16 h[],                           /* (i) Q15 : impulse response of synth/wgt filters */
-		Word16 L_subfr,
-		Word16 t_min,                         /* (i)     : minimum value of pitch lag.           */
-		Word16 t_max,                         /* (i)     : maximum value of pitch lag.           */
-		Word16 corr_norm[]                    /* (o) Q15 : normalized correlation                */
-		);
+        Word16 exc[],                         /* (i)     : excitation buffer                     */
+        Word16 xn[],                          /* (i)     : target vector                         */
+        Word16 h[],                           /* (i) Q15 : impulse response of synth/wgt filters */
+        Word16 L_subfr,
+        Word16 t_min,                         /* (i)     : minimum value of pitch lag.           */
+        Word16 t_max,                         /* (i)     : maximum value of pitch lag.           */
+        Word16 corr_norm[]                    /* (o) Q15 : normalized correlation                */
+        );
 #endif
 
 static Word16 Interpol_4(                  /* (o)  : interpolated value  */
-		Word16 * x,                           /* (i)  : input vector        */
-		Word32 frac                           /* (i)  : fraction (-4..+3)   */
-		);
+        Word16 * x,                           /* (i)  : input vector        */
+        Word32 frac                           /* (i)  : fraction (-4..+3)   */
+        );
 
 
 Word16 Pitch_fr4(                          /* (o)     : pitch period.                         */
-		Word16 exc[],                         /* (i)     : excitation buffer                     */
-		Word16 xn[],                          /* (i)     : target vector                         */
-		Word16 h[],                           /* (i) Q15 : impulse response of synth/wgt filters */
-		Word16 t0_min,                        /* (i)     : minimum value in the searched range.  */
-		Word16 t0_max,                        /* (i)     : maximum value in the searched range.  */
-		Word16 * pit_frac,                    /* (o)     : chosen fraction (0, 1, 2 or 3).       */
-		Word16 i_subfr,                       /* (i)     : indicator for first subframe.         */
-		Word16 t0_fr2,                        /* (i)     : minimum value for resolution 1/2      */
-		Word16 t0_fr1,                        /* (i)     : minimum value for resolution 1        */
-		Word16 L_subfr                        /* (i)     : Length of subframe                    */
-		)
+        Word16 exc[],                         /* (i)     : excitation buffer                     */
+        Word16 xn[],                          /* (i)     : target vector                         */
+        Word16 h[],                           /* (i) Q15 : impulse response of synth/wgt filters */
+        Word16 t0_min,                        /* (i)     : minimum value in the searched range.  */
+        Word16 t0_max,                        /* (i)     : maximum value in the searched range.  */
+        Word16 * pit_frac,                    /* (o)     : chosen fraction (0, 1, 2 or 3).       */
+        Word16 i_subfr,                       /* (i)     : indicator for first subframe.         */
+        Word16 t0_fr2,                        /* (i)     : minimum value for resolution 1/2      */
+        Word16 t0_fr1,                        /* (i)     : minimum value for resolution 1        */
+        Word16 L_subfr                        /* (i)     : Length of subframe                    */
+        )
 {
-	Word32 fraction, i;
-	Word16 t_min, t_max;
-	Word16 max, t0, step, temp;
-	Word16 *corr;
-	Word16 corr_v[40];                     /* Total length = t0_max-t0_min+1+2*L_inter */
+    Word32 fraction, i;
+    Word16 t_min, t_max;
+    Word16 max, t0, step, temp;
+    Word16 *corr;
+    Word16 corr_v[40];                     /* Total length = t0_max-t0_min+1+2*L_inter */
 
-	/* Find interval to compute normalized correlation */
+    /* Find interval to compute normalized correlation */
 
-	t_min = t0_min - L_INTERPOL1;
-	t_max = t0_max + L_INTERPOL1;
-	corr = &corr_v[-t_min];
-	/* Compute normalized correlation between target and filtered excitation */
+    t_min = L_sub(t0_min, L_INTERPOL1);
+    t_max = L_add(t0_max, L_INTERPOL1);
+    corr = &corr_v[-t_min];
+    /* Compute normalized correlation between target and filtered excitation */
 #ifdef ASM_OPT               /* asm optimization branch */
     Norm_corr_asm(exc, xn, h, L_subfr, t_min, t_max, corr);
 #else
-	Norm_Corr(exc, xn, h, L_subfr, t_min, t_max, corr);
+    Norm_Corr(exc, xn, h, L_subfr, t_min, t_max, corr);
 #endif
 
-	/* Find integer pitch */
+    /* Find integer pitch */
 
-	max = corr[t0_min];
-	t0 = t0_min;
-	for (i = t0_min + 1; i <= t0_max; i++)
-	{
-		if (corr[i] >= max)
-		{
-			max = corr[i];
-			t0 = i;
-		}
-	}
-	/* If first subframe and t0 >= t0_fr1, do not search fractionnal pitch */
-	if ((i_subfr == 0) && (t0 >= t0_fr1))
-	{
-		*pit_frac = 0;
-		return (t0);
-	}
-	/*------------------------------------------------------------------*
-	 * Search fractionnal pitch with 1/4 subsample resolution.          *
-	 * Test the fractions around t0 and choose the one which maximizes  *
-	 * the interpolated normalized correlation.                         *
-	 *------------------------------------------------------------------*/
+    max = corr[t0_min];
+    t0 = t0_min;
+    for (i = t0_min + 1; i <= t0_max; i++)
+    {
+        if (corr[i] >= max)
+        {
+            max = corr[i];
+            t0 = i;
+        }
+    }
+    /* If first subframe and t0 >= t0_fr1, do not search fractionnal pitch */
+    if ((i_subfr == 0) && (t0 >= t0_fr1))
+    {
+        *pit_frac = 0;
+        return (t0);
+    }
+    /*------------------------------------------------------------------*
+     * Search fractionnal pitch with 1/4 subsample resolution.          *
+     * Test the fractions around t0 and choose the one which maximizes  *
+     * the interpolated normalized correlation.                         *
+     *------------------------------------------------------------------*/
 
-	step = 1;               /* 1/4 subsample resolution */
-	fraction = -3;
-	if ((t0_fr2 == PIT_MIN)||((i_subfr == 0) && (t0 >= t0_fr2)))
-	{
-		step = 2;              /* 1/2 subsample resolution */
-		fraction = -2;
-	}
-	if(t0 == t0_min)
-	{
-		fraction = 0;
-	}
-	max = Interpol_4(&corr[t0], fraction);
+    step = 1;               /* 1/4 subsample resolution */
+    fraction = -3;
+    if ((t0_fr2 == PIT_MIN)||((i_subfr == 0) && (t0 >= t0_fr2)))
+    {
+        step = 2;              /* 1/2 subsample resolution */
+        fraction = -2;
+    }
+    if(t0 == t0_min)
+    {
+        fraction = 0;
+    }
+    max = Interpol_4(&corr[t0], fraction);
 
-	for (i = fraction + step; i <= 3; i += step)
-	{
-		temp = Interpol_4(&corr[t0], i);
-		if(temp > max)
-		{
-			max = temp;
-			fraction = i;
-		}
-	}
-	/* limit the fraction value in the interval [0,1,2,3] */
-	if (fraction < 0)
-	{
-		fraction += UP_SAMP;
-		t0 -= 1;
-	}
-	*pit_frac = fraction;
-	return (t0);
+    for (i = fraction + step; i <= 3; i += step)
+    {
+        temp = Interpol_4(&corr[t0], i);
+        if(temp > max)
+        {
+            max = temp;
+            fraction = i;
+        }
+    }
+    /* limit the fraction value in the interval [0,1,2,3] */
+    if (fraction < 0)
+    {
+        fraction += UP_SAMP;
+        t0 -= 1;
+    }
+    *pit_frac = fraction;
+    return (t0);
 }
 
 
@@ -161,109 +161,109 @@
 ************************************************************************************/
 #ifndef ASM_OPT
 static void Norm_Corr(
-		Word16 exc[],                         /* (i)     : excitation buffer                     */
-		Word16 xn[],                          /* (i)     : target vector                         */
-		Word16 h[],                           /* (i) Q15 : impulse response of synth/wgt filters */
-		Word16 L_subfr,
-		Word16 t_min,                         /* (i)     : minimum value of pitch lag.           */
-		Word16 t_max,                         /* (i)     : maximum value of pitch lag.           */
-		Word16 corr_norm[])                   /* (o) Q15 : normalized correlation                */
+        Word16 exc[],                         /* (i)     : excitation buffer                     */
+        Word16 xn[],                          /* (i)     : target vector                         */
+        Word16 h[],                           /* (i) Q15 : impulse response of synth/wgt filters */
+        Word16 L_subfr,
+        Word16 t_min,                         /* (i)     : minimum value of pitch lag.           */
+        Word16 t_max,                         /* (i)     : maximum value of pitch lag.           */
+        Word16 corr_norm[])                   /* (o) Q15 : normalized correlation                */
 {
-	Word32 i, k, t;
-	Word32 corr, exp_corr, norm, exp, scale;
-	Word16 exp_norm, excf[L_SUBFR], tmp;
-	Word32 L_tmp, L_tmp1, L_tmp2;
+    Word32 i, k, t;
+    Word32 corr, exp_corr, norm, exp, scale;
+    Word16 exp_norm, excf[L_SUBFR], tmp;
+    Word32 L_tmp, L_tmp1, L_tmp2;
         UNUSED(L_subfr);
 
-	/* compute the filtered excitation for the first delay t_min */
-	k = -t_min;
+    /* compute the filtered excitation for the first delay t_min */
+    k = -t_min;
 
 #ifdef ASM_OPT              /* asm optimization branch */
-	Convolve_asm(&exc[k], h, excf, 64);
+    Convolve_asm(&exc[k], h, excf, 64);
 #else
-	Convolve(&exc[k], h, excf, 64);
+    Convolve(&exc[k], h, excf, 64);
 #endif
 
-	/* Compute rounded down 1/sqrt(energy of xn[]) */
-	L_tmp = 0;
-	for (i = 0; i < 64; i+=4)
-	{
-		L_tmp += (xn[i] * xn[i]);
-		L_tmp += (xn[i+1] * xn[i+1]);
-		L_tmp += (xn[i+2] * xn[i+2]);
-		L_tmp += (xn[i+3] * xn[i+3]);
-	}
+    /* Compute rounded down 1/sqrt(energy of xn[]) */
+    L_tmp = 0;
+    for (i = 0; i < 64; i+=4)
+    {
+        L_tmp = L_add(L_tmp, (xn[i] * xn[i]));
+        L_tmp = L_add(L_tmp, (xn[i+1] * xn[i+1]));
+        L_tmp = L_add(L_tmp, (xn[i+2] * xn[i+2]));
+        L_tmp = L_add(L_tmp, (xn[i+3] * xn[i+3]));
+    }
 
-	L_tmp = (L_tmp << 1) + 1;
-	exp = norm_l(L_tmp);
-	exp = (32 - exp);
-	//exp = exp + 2;                     /* energy of xn[] x 2 + rounded up     */
-	scale = -(exp >> 1);           /* (1<<scale) < 1/sqrt(energy rounded) */
+    L_tmp = L_add(L_shl(L_tmp, 1), 1);
+    exp = norm_l(L_tmp);
+    exp = L_sub(32, exp);
+    //exp = exp + 2;                     /* energy of xn[] x 2 + rounded up     */
+    scale = -(exp >> 1);           /* (1<<scale) < 1/sqrt(energy rounded) */
 
-	/* loop for every possible period */
+    /* loop for every possible period */
 
-	for (t = t_min; t <= t_max; t++)
-	{
-		/* Compute correlation between xn[] and excf[] */
-		L_tmp  = 0;
-		L_tmp1 = 0;
-		for (i = 0; i < 64; i+=4)
-		{
-			L_tmp  += (xn[i] * excf[i]);
-			L_tmp1 += (excf[i] * excf[i]);
-			L_tmp  += (xn[i+1] * excf[i+1]);
-			L_tmp1 += (excf[i+1] * excf[i+1]);
-			L_tmp  += (xn[i+2] * excf[i+2]);
-			L_tmp1 += (excf[i+2] * excf[i+2]);
-			L_tmp  += (xn[i+3] * excf[i+3]);
-			L_tmp1 += (excf[i+3] * excf[i+3]);
-		}
+    for (t = t_min; t <= t_max; t++)
+    {
+        /* Compute correlation between xn[] and excf[] */
+        L_tmp  = 0;
+        L_tmp1 = 0;
+        for (i = 0; i < 64; i+=4)
+        {
+            L_tmp = L_add(L_tmp, (xn[i] * excf[i]));
+            L_tmp1 = L_add(L_tmp1, (excf[i] * excf[i]));
+            L_tmp = L_add(L_tmp, (xn[i+1] * excf[i+1]));
+            L_tmp1 = L_add(L_tmp1, (excf[i+1] * excf[i+1]));
+            L_tmp = L_add(L_tmp, (xn[i+2] * excf[i+2]));
+            L_tmp1 = L_add(L_tmp1, (excf[i+2] * excf[i+2]));
+            L_tmp = L_add(L_tmp, (xn[i+3] * excf[i+3]));
+            L_tmp1 = L_add(L_tmp1, (excf[i+3] * excf[i+3]));
+        }
 
-		L_tmp = (L_tmp << 1) + 1;
-		L_tmp1 = (L_tmp1 << 1) + 1;
+        L_tmp = L_add(L_shl(L_tmp, 1), 1);
+        L_tmp1 = L_add(L_shl(L_tmp1, 1), 1);
 
-		exp = norm_l(L_tmp);
-		L_tmp = (L_tmp << exp);
-		exp_corr = (30 - exp);
-		corr = extract_h(L_tmp);
+        exp = norm_l(L_tmp);
+        L_tmp = L_shl(L_tmp, exp);
+        exp_corr = L_sub(30, exp);
+        corr = extract_h(L_tmp);
 
-		exp = norm_l(L_tmp1);
-		L_tmp = (L_tmp1 << exp);
-		exp_norm = (30 - exp);
+        exp = norm_l(L_tmp1);
+        L_tmp = L_shl(L_tmp1, exp);
+        exp_norm = L_sub(30, exp);
 
-		Isqrt_n(&L_tmp, &exp_norm);
-		norm = extract_h(L_tmp);
+        Isqrt_n(&L_tmp, &exp_norm);
+        norm = extract_h(L_tmp);
 
-		/* Normalize correlation = correlation * (1/sqrt(energy)) */
+        /* Normalize correlation = correlation * (1/sqrt(energy)) */
 
-		L_tmp = vo_L_mult(corr, norm);
+        L_tmp = L_mult(corr, norm);
 
-		L_tmp2 = exp_corr + exp_norm + scale;
-		if(L_tmp2 < 0)
-		{
-			L_tmp2 = -L_tmp2;
-			L_tmp = L_tmp >> L_tmp2;
-		}
-		else
-		{
-			L_tmp = L_tmp << L_tmp2;
-		}
+        L_tmp2 = L_add(exp_corr, exp_norm + scale);
+        if(L_tmp2 < 0)
+        {
+            L_tmp2 = -L_tmp2;
+            L_tmp = L_tmp >> L_tmp2;
+        }
+        else
+        {
+            L_tmp = L_shl(L_tmp, L_tmp2);
+        }
 
-		corr_norm[t] = vo_round(L_tmp);
-		/* modify the filtered excitation excf[] for the next iteration */
+        corr_norm[t] = voround(L_tmp);
+        /* modify the filtered excitation excf[] for the next iteration */
 
-		if(t != t_max)
-		{
-			k = -(t + 1);
-			tmp = exc[k];
-			for (i = 63; i > 0; i--)
-			{
-				excf[i] = add1(vo_mult(tmp, h[i]), excf[i - 1]);
-			}
-			excf[0] = vo_mult(tmp, h[0]);
-		}
-	}
-	return;
+        if(t != t_max)
+        {
+            k = -(t + 1);
+            tmp = exc[k];
+            for (i = 63; i > 0; i--)
+            {
+                excf[i] = add1(vo_mult(tmp, h[i]), excf[i - 1]);
+            }
+            excf[0] = vo_mult(tmp, h[0]);
+        }
+    }
+    return;
 }
 
 #endif
@@ -276,10 +276,10 @@
 /* 1/4 resolution interpolation filter (-3 dB at 0.791*fs/2) in Q14 */
 static Word16 inter4_1[4][8] =
 {
-	{-12, 420, -1732, 5429, 13418, -1242, 73, 32},
-	{-26, 455, -2142, 9910, 9910,  -2142, 455, -26},
-	{32,  73, -1242, 13418, 5429, -1732, 420, -12},
-	{206, -766, 1376, 14746, 1376, -766, 206, 0}
+    {-12, 420, -1732, 5429, 13418, -1242, 73, 32},
+    {-26, 455, -2142, 9910, 9910,  -2142, 455, -26},
+    {32,  73, -1242, 13418, 5429, -1732, 420, -12},
+    {206, -766, 1376, 14746, 1376, -766, 206, 0}
 };
 
 /*** Coefficients in floating point
@@ -292,34 +292,34 @@
 ***/
 
 static Word16 Interpol_4(                  /* (o)  : interpolated value  */
-		Word16 * x,                           /* (i)  : input vector        */
-		Word32 frac                           /* (i)  : fraction (-4..+3)   */
-		)
+        Word16 * x,                           /* (i)  : input vector        */
+        Word32 frac                           /* (i)  : fraction (-4..+3)   */
+        )
 {
-	Word16 sum;
-	Word32  k, L_sum;
-	Word16 *ptr;
+    Word16 sum;
+    Word32  k, L_sum;
+    Word16 *ptr;
 
-	if (frac < 0)
-	{
-		frac += UP_SAMP;
-		x--;
-	}
-	x = x - L_INTERPOL1 + 1;
-	k = UP_SAMP - 1 - frac;
-	ptr = &(inter4_1[k][0]);
+    if (frac < 0)
+    {
+        frac += UP_SAMP;
+        x--;
+    }
+    x = x - L_INTERPOL1 + 1;
+    k = UP_SAMP - 1 - frac;
+    ptr = &(inter4_1[k][0]);
 
-	L_sum  = vo_mult32(x[0], (*ptr++));
-	L_sum += vo_mult32(x[1], (*ptr++));
-	L_sum += vo_mult32(x[2], (*ptr++));
-	L_sum += vo_mult32(x[3], (*ptr++));
-	L_sum += vo_mult32(x[4], (*ptr++));
-	L_sum += vo_mult32(x[5], (*ptr++));
-	L_sum += vo_mult32(x[6], (*ptr++));
-	L_sum += vo_mult32(x[7], (*ptr++));
+    L_sum  = vo_mult32(x[0], (*ptr++));
+    L_sum = L_add(L_sum, vo_mult32(x[1], (*ptr++)));
+    L_sum = L_add(L_sum, vo_mult32(x[2], (*ptr++)));
+    L_sum = L_add(L_sum, vo_mult32(x[3], (*ptr++)));
+    L_sum = L_add(L_sum, vo_mult32(x[4], (*ptr++)));
+    L_sum = L_add(L_sum, vo_mult32(x[5], (*ptr++)));
+    L_sum = L_add(L_sum, vo_mult32(x[6], (*ptr++)));
+    L_sum = L_add(L_sum, vo_mult32(x[7], (*ptr++)));
 
-	sum = extract_h(L_add(L_shl2(L_sum, 2), 0x8000));
-	return (sum);
+    sum = extract_h(L_add(L_shl2(L_sum, 2), 0x8000));
+    return (sum);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/pred_lt4.c b/media/libstagefright/codecs/amrwbenc/src/pred_lt4.c
index 8404cf9..386cab3 100644
--- a/media/libstagefright/codecs/amrwbenc/src/pred_lt4.c
+++ b/media/libstagefright/codecs/amrwbenc/src/pred_lt4.c
@@ -34,86 +34,86 @@
 
 Word16 inter4_2[4][32] =
 {
-	{0,-2,4,-2,-10,38,-88,165,-275,424,-619,871,-1207,1699,-2598,5531,14031,-2147,780,-249,
-	-16,153,-213,226,-209,175,-133,91,-55,28,-10,2},
+    {0,-2,4,-2,-10,38,-88,165,-275,424,-619,871,-1207,1699,-2598,5531,14031,-2147,780,-249,
+    -16,153,-213,226,-209,175,-133,91,-55,28,-10,2},
 
-	{1,-7,19,-33,47,-52,43,-9,-60,175,-355,626,-1044,1749,-3267,10359,10359,-3267,1749,-1044,
-	626,-355,175,-60,-9,43,-52,47,-33,19, -7, 1},
+    {1,-7,19,-33,47,-52,43,-9,-60,175,-355,626,-1044,1749,-3267,10359,10359,-3267,1749,-1044,
+    626,-355,175,-60,-9,43,-52,47,-33,19, -7, 1},
 
-	{2,-10,28,-55,91,-133,175,-209,226,-213,153,-16,-249,780,-2147,14031,5531,-2598,1699,-1207,
-	871,-619,424,-275,165,-88,38,-10,-2,4,-2,0},
+    {2,-10,28,-55,91,-133,175,-209,226,-213,153,-16,-249,780,-2147,14031,5531,-2598,1699,-1207,
+    871,-619,424,-275,165,-88,38,-10,-2,4,-2,0},
 
-	{1,-7,22,-49,92,-153,231,-325,431,-544,656,-762,853,-923,968,15401,968,-923,853,-762,
-	656,-544,431,-325,231,-153,92,-49,22,-7, 1, 0}
+    {1,-7,22,-49,92,-153,231,-325,431,-544,656,-762,853,-923,968,15401,968,-923,853,-762,
+    656,-544,431,-325,231,-153,92,-49,22,-7, 1, 0}
 
 };
 
 void Pred_lt4(
-		Word16 exc[],                         /* in/out: excitation buffer */
-		Word16 T0,                            /* input : integer pitch lag */
-		Word16 frac,                          /* input : fraction of lag   */
-		Word16 L_subfr                        /* input : subframe size     */
-	     )
+        Word16 exc[],                         /* in/out: excitation buffer */
+        Word16 T0,                            /* input : integer pitch lag */
+        Word16 frac,                          /* input : fraction of lag   */
+        Word16 L_subfr                        /* input : subframe size     */
+         )
 {
-	Word16 j, k, *x;
-	Word32 L_sum;
-	Word16 *ptr, *ptr1;
-	Word16 *ptr2;
+    Word16 j, k, *x;
+    Word32 L_sum;
+    Word16 *ptr, *ptr1;
+    Word16 *ptr2;
 
-	x = exc - T0;
-	frac = -frac;
-	if (frac < 0)
-	{
-		frac += UP_SAMP;
-		x--;
-	}
-	x -= 15;                                     /* x = L_INTERPOL2 - 1 */
-	k = 3 - frac;                                /* k = UP_SAMP - 1 - frac */
+    x = exc - T0;
+    frac = -frac;
+    if (frac < 0)
+    {
+        frac += UP_SAMP;
+        x--;
+    }
+    x -= 15;                                     /* x = L_INTERPOL2 - 1 */
+    k = 3 - frac;                                /* k = UP_SAMP - 1 - frac */
 
-	ptr2 = &(inter4_2[k][0]);
-	for (j = 0; j < L_subfr; j++)
-	{
-		ptr = ptr2;
-		ptr1 = x;
-		L_sum  = vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
-		L_sum += vo_mult32((*ptr1++), (*ptr++));
+    ptr2 = &(inter4_2[k][0]);
+    for (j = 0; j < L_subfr; j++)
+    {
+        ptr = ptr2;
+        ptr1 = x;
+        L_sum  = vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
+        L_sum += vo_mult32((*ptr1++), (*ptr++));
 
-		L_sum = L_shl2(L_sum, 2);
-		exc[j] = extract_h(L_add(L_sum, 0x8000));
-		x++;
-	}
+        L_sum = L_shl2(L_sum, 2);
+        exc[j] = extract_h(L_add(L_sum, 0x8000));
+        x++;
+    }
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/preemph.c b/media/libstagefright/codecs/amrwbenc/src/preemph.c
index c867bf7..70c8650 100644
--- a/media/libstagefright/codecs/amrwbenc/src/preemph.c
+++ b/media/libstagefright/codecs/amrwbenc/src/preemph.c
@@ -18,7 +18,7 @@
 *      File: preemph.c                                                *
 *                                                                     *
 *      Description: Preemphasis: filtering through 1 - g z^-1         *
-*	           Preemph2 --> signal is multiplied by 2             *
+*              Preemph2 --> signal is multiplied by 2             *
 *                                                                     *
 ************************************************************************/
 
@@ -26,62 +26,74 @@
 #include "basic_op.h"
 
 void Preemph(
-		Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
-		Word16 mu,                            /* (i) Q15 : preemphasis coefficient                */
-		Word16 lg,                            /* (i)     : lenght of filtering                    */
-		Word16 * mem                          /* (i/o)   : memory (x[-1])                         */
-	    )
+        Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
+        Word16 mu,                            /* (i) Q15 : preemphasis coefficient                */
+        Word16 lg,                            /* (i)     : lenght of filtering                    */
+        Word16 * mem                          /* (i/o)   : memory (x[-1])                         */
+        )
 {
-	Word16 temp;
-	Word32 i, L_tmp;
+    Word16 temp;
+    Word32 i, L_tmp;
 
-	temp = x[lg - 1];
+    temp = x[lg - 1];
 
-	for (i = lg - 1; i > 0; i--)
-	{
-		L_tmp = L_deposit_h(x[i]);
-		L_tmp -= (x[i - 1] * mu)<<1;
-		x[i] = (L_tmp + 0x8000)>>16;
-	}
+    for (i = lg - 1; i > 0; i--)
+    {
+        L_tmp = L_deposit_h(x[i]);
+        L_tmp -= (x[i - 1] * mu)<<1;
+        x[i] = (L_tmp + 0x8000)>>16;
+    }
 
-	L_tmp = L_deposit_h(x[0]);
-	L_tmp -= ((*mem) * mu)<<1;
-	x[0] = (L_tmp + 0x8000)>>16;
+    L_tmp = L_deposit_h(x[0]);
+    L_tmp -= ((*mem) * mu)<<1;
+    x[0] = (L_tmp + 0x8000)>>16;
 
-	*mem = temp;
+    *mem = temp;
 
-	return;
+    return;
 }
 
 
 void Preemph2(
-		Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
-		Word16 mu,                            /* (i) Q15 : preemphasis coefficient                */
-		Word16 lg,                            /* (i)     : lenght of filtering                    */
-		Word16 * mem                          /* (i/o)   : memory (x[-1])                         */
-	     )
+        Word16 x[],                           /* (i/o)   : input signal overwritten by the output */
+        Word16 mu,                            /* (i) Q15 : preemphasis coefficient                */
+        Word16 lg,                            /* (i)     : lenght of filtering                    */
+        Word16 * mem                          /* (i/o)   : memory (x[-1])                         */
+         )
 {
-	Word16 temp;
-	Word32 i, L_tmp;
+    Word16 temp;
+    Word32 i, L_tmp;
 
-	temp = x[lg - 1];
+    temp = x[lg - 1];
 
-	for (i = (Word16) (lg - 1); i > 0; i--)
-	{
-		L_tmp = L_deposit_h(x[i]);
-		L_tmp -= (x[i - 1] * mu)<<1;
-		L_tmp = (L_tmp << 1);
-		x[i] = (L_tmp + 0x8000)>>16;
-	}
+    for (i = (Word16) (lg - 1); i > 0; i--)
+    {
+        L_tmp = L_deposit_h(x[i]);
+        L_tmp -= (x[i - 1] * mu)<<1; // only called with mu == 22282, so this won't overflow
+        if (L_tmp > INT32_MAX / 2) {
+            L_tmp = INT32_MAX / 2;
+        }
+        L_tmp = (L_tmp << 1);
+        if (L_tmp > INT32_MAX - 0x8000) {
+            L_tmp = INT32_MAX - 0x8000;
+        }
+        x[i] = (L_tmp + 0x8000)>>16;
+    }
 
-	L_tmp = L_deposit_h(x[0]);
-	L_tmp -= ((*mem) * mu)<<1;
-	L_tmp = (L_tmp << 1);
-	x[0] = (L_tmp + 0x8000)>>16;
+    L_tmp = L_deposit_h(x[0]);
+    L_tmp -= ((*mem) * mu)<<1;
+    if (L_tmp > INT32_MAX / 2) {
+        L_tmp = INT32_MAX / 2;
+    }
+    L_tmp = (L_tmp << 1);
+    if (L_tmp > INT32_MAX - 0x8000) {
+        L_tmp = INT32_MAX - 0x8000;
+    }
+    x[0] = (L_tmp + 0x8000)>>16;
 
-	*mem = temp;
+    *mem = temp;
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/q_gain2.c b/media/libstagefright/codecs/amrwbenc/src/q_gain2.c
index e8ca043..bb797d8 100644
--- a/media/libstagefright/codecs/amrwbenc/src/q_gain2.c
+++ b/media/libstagefright/codecs/amrwbenc/src/q_gain2.c
@@ -45,300 +45,300 @@
 
 
 void Init_Q_gain2(
-		Word16 * mem                          /* output  :static memory (2 words)      */
-		)
+        Word16 * mem                          /* output  :static memory (2 words)      */
+        )
 {
-	Word32 i;
+    Word32 i;
 
-	/* 4nd order quantizer energy predictor (init to -14.0 in Q10) */
-	for (i = 0; i < PRED_ORDER; i++)
-	{
-		mem[i] = -14336;                     /* past_qua_en[i] */
-	}
+    /* 4nd order quantizer energy predictor (init to -14.0 in Q10) */
+    for (i = 0; i < PRED_ORDER; i++)
+    {
+        mem[i] = -14336;                     /* past_qua_en[i] */
+    }
 
-	return;
+    return;
 }
 
 Word16 Q_gain2(                            /* Return index of quantization.          */
-		Word16 xn[],                          /* (i) Q_xn: Target vector.               */
-		Word16 y1[],                          /* (i) Q_xn: Adaptive codebook.           */
-		Word16 Q_xn,                          /* (i)     : xn and y1 format             */
-		Word16 y2[],                          /* (i) Q9  : Filtered innovative vector.  */
-		Word16 code[],                        /* (i) Q9  : Innovative vector.           */
-		Word16 g_coeff[],                     /* (i)     : Correlations <xn y1> <y1 y1> */
-		/*           Compute in G_pitch().        */
-		Word16 L_subfr,                       /* (i)     : Subframe lenght.             */
-		Word16 nbits,                         /* (i)     : number of bits (6 or 7)      */
-		Word16 * gain_pit,                    /* (i/o)Q14: Pitch gain.                  */
-		Word32 * gain_cod,                    /* (o) Q16 : Code gain.                   */
-		Word16 gp_clip,                       /* (i)     : Gp Clipping flag             */
-		Word16 * mem                          /* (i/o)   : static memory (2 words)      */
-	      )
+        Word16 xn[],                          /* (i) Q_xn: Target vector.               */
+        Word16 y1[],                          /* (i) Q_xn: Adaptive codebook.           */
+        Word16 Q_xn,                          /* (i)     : xn and y1 format             */
+        Word16 y2[],                          /* (i) Q9  : Filtered innovative vector.  */
+        Word16 code[],                        /* (i) Q9  : Innovative vector.           */
+        Word16 g_coeff[],                     /* (i)     : Correlations <xn y1> <y1 y1> */
+        /*           Compute in G_pitch().        */
+        Word16 L_subfr,                       /* (i)     : Subframe lenght.             */
+        Word16 nbits,                         /* (i)     : number of bits (6 or 7)      */
+        Word16 * gain_pit,                    /* (i/o)Q14: Pitch gain.                  */
+        Word32 * gain_cod,                    /* (o) Q16 : Code gain.                   */
+        Word16 gp_clip,                       /* (i)     : Gp Clipping flag             */
+        Word16 * mem                          /* (i/o)   : static memory (2 words)      */
+          )
 {
-	Word16 index, *p, min_ind, size;
-	Word16 exp, frac, gcode0, exp_gcode0, e_max, exp_code, qua_ener;
-	Word16 g_pitch, g2_pitch, g_code, g_pit_cod, g2_code, g2_code_lo;
-	Word16 coeff[5], coeff_lo[5], exp_coeff[5];
-	Word16 exp_max[5];
-	Word32 i, j, L_tmp, dist_min;
-	Word16 *past_qua_en, *t_qua_gain;
+    Word16 index, *p, min_ind, size;
+    Word16 exp, frac, gcode0, exp_gcode0, e_max, exp_code, qua_ener;
+    Word16 g_pitch, g2_pitch, g_code, g_pit_cod, g2_code, g2_code_lo;
+    Word16 coeff[5], coeff_lo[5], exp_coeff[5];
+    Word16 exp_max[5];
+    Word32 i, j, L_tmp, dist_min;
+    Word16 *past_qua_en, *t_qua_gain;
 
-	past_qua_en = mem;
+    past_qua_en = mem;
 
-	/*-----------------------------------------------------------------*
-	 * - Find the initial quantization pitch index                     *
-	 * - Set gains search range                                        *
-	 *-----------------------------------------------------------------*/
-	if (nbits == 6)
-	{
-		t_qua_gain = t_qua_gain6b;
-		min_ind = 0;
-		size = RANGE;
+    /*-----------------------------------------------------------------*
+     * - Find the initial quantization pitch index                     *
+     * - Set gains search range                                        *
+     *-----------------------------------------------------------------*/
+    if (nbits == 6)
+    {
+        t_qua_gain = t_qua_gain6b;
+        min_ind = 0;
+        size = RANGE;
 
-		if(gp_clip == 1)
-		{
-			size = size - 16;          /* limit gain pitch to 1.0 */
-		}
-	} else
-	{
-		t_qua_gain = t_qua_gain7b;
+        if(gp_clip == 1)
+        {
+            size = size - 16;          /* limit gain pitch to 1.0 */
+        }
+    } else
+    {
+        t_qua_gain = t_qua_gain7b;
 
-		p = t_qua_gain7b + RANGE;            /* pt at 1/4th of table */
+        p = t_qua_gain7b + RANGE;            /* pt at 1/4th of table */
 
-		j = nb_qua_gain7b - RANGE;
+        j = nb_qua_gain7b - RANGE;
 
-		if (gp_clip == 1)
-		{
-			j = j - 27;                /* limit gain pitch to 1.0 */
-		}
-		min_ind = 0;
-		g_pitch = *gain_pit;
+        if (gp_clip == 1)
+        {
+            j = j - 27;                /* limit gain pitch to 1.0 */
+        }
+        min_ind = 0;
+        g_pitch = *gain_pit;
 
-		for (i = 0; i < j; i++, p += 2)
-		{
-			if (g_pitch > *p)
-			{
-				min_ind = min_ind + 1;
-			}
-		}
-		size = RANGE;
-	}
+        for (i = 0; i < j; i++, p += 2)
+        {
+            if (g_pitch > *p)
+            {
+                min_ind = min_ind + 1;
+            }
+        }
+        size = RANGE;
+    }
 
-	/*------------------------------------------------------------------*
-	 *  Compute coefficient need for the quantization.                  *
-	 *                                                                  *
-	 *  coeff[0] =    y1 y1                                             *
-	 *  coeff[1] = -2 xn y1                                             *
-	 *  coeff[2] =    y2 y2                                             *
-	 *  coeff[3] = -2 xn y2                                             *
-	 *  coeff[4] =  2 y1 y2                                             *
-	 *                                                                  *
-	 * Product <y1 y1> and <xn y1> have been compute in G_pitch() and   *
-	 * are in vector g_coeff[].                                         *
-	 *------------------------------------------------------------------*/
+    /*------------------------------------------------------------------*
+     *  Compute coefficient need for the quantization.                  *
+     *                                                                  *
+     *  coeff[0] =    y1 y1                                             *
+     *  coeff[1] = -2 xn y1                                             *
+     *  coeff[2] =    y2 y2                                             *
+     *  coeff[3] = -2 xn y2                                             *
+     *  coeff[4] =  2 y1 y2                                             *
+     *                                                                  *
+     * Product <y1 y1> and <xn y1> have been compute in G_pitch() and   *
+     * are in vector g_coeff[].                                         *
+     *------------------------------------------------------------------*/
 
-	coeff[0] = g_coeff[0];
-	exp_coeff[0] = g_coeff[1];
-	coeff[1] = negate(g_coeff[2]);                    /* coeff[1] = -2 xn y1 */
-	exp_coeff[1] = g_coeff[3] + 1;
+    coeff[0] = g_coeff[0];
+    exp_coeff[0] = g_coeff[1];
+    coeff[1] = negate(g_coeff[2]);                    /* coeff[1] = -2 xn y1 */
+    exp_coeff[1] = g_coeff[3] + 1;
 
-	/* Compute scalar product <y2[],y2[]> */
+    /* Compute scalar product <y2[],y2[]> */
 #ifdef ASM_OPT                   /* asm optimization branch */
-	coeff[2] = extract_h(Dot_product12_asm(y2, y2, L_subfr, &exp));
+    coeff[2] = extract_h(Dot_product12_asm(y2, y2, L_subfr, &exp));
 #else
-	coeff[2] = extract_h(Dot_product12(y2, y2, L_subfr, &exp));
+    coeff[2] = extract_h(Dot_product12(y2, y2, L_subfr, &exp));
 #endif
-	exp_coeff[2] = (exp - 18) + (Q_xn << 1);     /* -18 (y2 Q9) */
+    exp_coeff[2] = (exp - 18) + (Q_xn << 1);     /* -18 (y2 Q9) */
 
-	/* Compute scalar product -2*<xn[],y2[]> */
+    /* Compute scalar product -2*<xn[],y2[]> */
 #ifdef ASM_OPT                  /* asm optimization branch */
-	coeff[3] = extract_h(L_negate(Dot_product12_asm(xn, y2, L_subfr, &exp)));
+    coeff[3] = extract_h(L_negate(Dot_product12_asm(xn, y2, L_subfr, &exp)));
 #else
-	coeff[3] = extract_h(L_negate(Dot_product12(xn, y2, L_subfr, &exp)));
+    coeff[3] = extract_h(L_negate(Dot_product12(xn, y2, L_subfr, &exp)));
 #endif
 
-	exp_coeff[3] = (exp - 8) + Q_xn;  /* -9 (y2 Q9), +1 (2 xn y2) */
+    exp_coeff[3] = (exp - 8) + Q_xn;  /* -9 (y2 Q9), +1 (2 xn y2) */
 
-	/* Compute scalar product 2*<y1[],y2[]> */
+    /* Compute scalar product 2*<y1[],y2[]> */
 #ifdef ASM_OPT                 /* asm optimization branch */
-	coeff[4] = extract_h(Dot_product12_asm(y1, y2, L_subfr, &exp));
+    coeff[4] = extract_h(Dot_product12_asm(y1, y2, L_subfr, &exp));
 #else
-	coeff[4] = extract_h(Dot_product12(y1, y2, L_subfr, &exp));
+    coeff[4] = extract_h(Dot_product12(y1, y2, L_subfr, &exp));
 #endif
-	exp_coeff[4] = (exp - 8) + Q_xn;  /* -9 (y2 Q9), +1 (2 y1 y2) */
+    exp_coeff[4] = (exp - 8) + Q_xn;  /* -9 (y2 Q9), +1 (2 y1 y2) */
 
-	/*-----------------------------------------------------------------*
-	 *  Find energy of code and compute:                               *
-	 *                                                                 *
-	 *    L_tmp = MEAN_ENER - 10log10(energy of code/ L_subfr)         *
-	 *          = MEAN_ENER - 3.0103*log2(energy of code/ L_subfr)     *
-	 *-----------------------------------------------------------------*/
+    /*-----------------------------------------------------------------*
+     *  Find energy of code and compute:                               *
+     *                                                                 *
+     *    L_tmp = MEAN_ENER - 10log10(energy of code/ L_subfr)         *
+     *          = MEAN_ENER - 3.0103*log2(energy of code/ L_subfr)     *
+     *-----------------------------------------------------------------*/
 #ifdef ASM_OPT                 /* asm optimization branch */
-	L_tmp = Dot_product12_asm(code, code, L_subfr, &exp_code);
+    L_tmp = Dot_product12_asm(code, code, L_subfr, &exp_code);
 #else
-	L_tmp = Dot_product12(code, code, L_subfr, &exp_code);
+    L_tmp = Dot_product12(code, code, L_subfr, &exp_code);
 #endif
-	/* exp_code: -18 (code in Q9), -6 (/L_subfr), -31 (L_tmp Q31->Q0) */
-	exp_code = (exp_code - (18 + 6 + 31));
+    /* exp_code: -18 (code in Q9), -6 (/L_subfr), -31 (L_tmp Q31->Q0) */
+    exp_code = (exp_code - (18 + 6 + 31));
 
-	Log2(L_tmp, &exp, &frac);
-	exp += exp_code;
-	L_tmp = Mpy_32_16(exp, frac, -24660);  /* x -3.0103(Q13) -> Q14 */
+    Log2(L_tmp, &exp, &frac);
+    exp += exp_code;
+    L_tmp = Mpy_32_16(exp, frac, -24660);  /* x -3.0103(Q13) -> Q14 */
 
-	L_tmp += (MEAN_ENER * 8192)<<1; /* + MEAN_ENER in Q14 */
+    L_tmp += (MEAN_ENER * 8192)<<1; /* + MEAN_ENER in Q14 */
 
-	/*-----------------------------------------------------------------*
-	 * Compute gcode0.                                                 *
-	 *  = Sum(i=0,1) pred[i]*past_qua_en[i] + mean_ener - ener_code    *
-	 *-----------------------------------------------------------------*/
-	L_tmp = (L_tmp << 10);              /* From Q14 to Q24 */
-	L_tmp += (pred[0] * past_qua_en[0])<<1;      /* Q13*Q10 -> Q24 */
-	L_tmp += (pred[1] * past_qua_en[1])<<1;      /* Q13*Q10 -> Q24 */
-	L_tmp += (pred[2] * past_qua_en[2])<<1;      /* Q13*Q10 -> Q24 */
-	L_tmp += (pred[3] * past_qua_en[3])<<1;      /* Q13*Q10 -> Q24 */
+    /*-----------------------------------------------------------------*
+     * Compute gcode0.                                                 *
+     *  = Sum(i=0,1) pred[i]*past_qua_en[i] + mean_ener - ener_code    *
+     *-----------------------------------------------------------------*/
+    L_tmp = (L_tmp << 10);              /* From Q14 to Q24 */
+    L_tmp += (pred[0] * past_qua_en[0])<<1;      /* Q13*Q10 -> Q24 */
+    L_tmp += (pred[1] * past_qua_en[1])<<1;      /* Q13*Q10 -> Q24 */
+    L_tmp += (pred[2] * past_qua_en[2])<<1;      /* Q13*Q10 -> Q24 */
+    L_tmp += (pred[3] * past_qua_en[3])<<1;      /* Q13*Q10 -> Q24 */
 
-	gcode0 = extract_h(L_tmp);             /* From Q24 to Q8  */
+    gcode0 = extract_h(L_tmp);             /* From Q24 to Q8  */
 
-	/*-----------------------------------------------------------------*
-	 * gcode0 = pow(10.0, gcode0/20)                                   *
-	 *        = pow(2, 3.321928*gcode0/20)                             *
-	 *        = pow(2, 0.166096*gcode0)                                *
-	 *-----------------------------------------------------------------*/
+    /*-----------------------------------------------------------------*
+     * gcode0 = pow(10.0, gcode0/20)                                   *
+     *        = pow(2, 3.321928*gcode0/20)                             *
+     *        = pow(2, 0.166096*gcode0)                                *
+     *-----------------------------------------------------------------*/
 
-	L_tmp = vo_L_mult(gcode0, 5443);          /* *0.166096 in Q15 -> Q24     */
-	L_tmp = L_tmp >> 8;               /* From Q24 to Q16             */
-	VO_L_Extract(L_tmp, &exp_gcode0, &frac);  /* Extract exponent of gcode0  */
+    L_tmp = vo_L_mult(gcode0, 5443);          /* *0.166096 in Q15 -> Q24     */
+    L_tmp = L_tmp >> 8;               /* From Q24 to Q16             */
+    VO_L_Extract(L_tmp, &exp_gcode0, &frac);  /* Extract exponent of gcode0  */
 
-	gcode0 = (Word16)(Pow2(14, frac));    /* Put 14 as exponent so that  */
-	/* output of Pow2() will be:   */
-	/* 16384 < Pow2() <= 32767     */
-	exp_gcode0 -= 14;
+    gcode0 = (Word16)(Pow2(14, frac));    /* Put 14 as exponent so that  */
+    /* output of Pow2() will be:   */
+    /* 16384 < Pow2() <= 32767     */
+    exp_gcode0 -= 14;
 
-	/*-------------------------------------------------------------------------*
-	 * Find the best quantizer                                                 *
-	 * ~~~~~~~~~~~~~~~~~~~~~~~                                                 *
-	 * Before doing the computation we need to aling exponents of coeff[]      *
-	 * to be sure to have the maximum precision.                               *
-	 *                                                                         *
-	 * In the table the pitch gains are in Q14, the code gains are in Q11 and  *
-	 * are multiply by gcode0 which have been multiply by 2^exp_gcode0.        *
-	 * Also when we compute g_pitch*g_pitch, g_code*g_code and g_pitch*g_code  *
-	 * we divide by 2^15.                                                      *
-	 * Considering all the scaling above we have:                              *
-	 *                                                                         *
-	 *   exp_code = exp_gcode0-11+15 = exp_gcode0+4                            *
-	 *                                                                         *
-	 *   g_pitch*g_pitch  = -14-14+15                                          *
-	 *   g_pitch          = -14                                                *
-	 *   g_code*g_code    = (2*exp_code)+15                                    *
-	 *   g_code           = exp_code                                           *
-	 *   g_pitch*g_code   = -14 + exp_code +15                                 *
-	 *                                                                         *
-	 *   g_pitch*g_pitch * coeff[0]  ;exp_max0 = exp_coeff[0] - 13             *
-	 *   g_pitch         * coeff[1]  ;exp_max1 = exp_coeff[1] - 14             *
-	 *   g_code*g_code   * coeff[2]  ;exp_max2 = exp_coeff[2] +15+(2*exp_code) *
-	 *   g_code          * coeff[3]  ;exp_max3 = exp_coeff[3] + exp_code       *
-	 *   g_pitch*g_code  * coeff[4]  ;exp_max4 = exp_coeff[4] + 1 + exp_code   *
-	 *-------------------------------------------------------------------------*/
+    /*-------------------------------------------------------------------------*
+     * Find the best quantizer                                                 *
+     * ~~~~~~~~~~~~~~~~~~~~~~~                                                 *
+     * Before doing the computation we need to aling exponents of coeff[]      *
+     * to be sure to have the maximum precision.                               *
+     *                                                                         *
+     * In the table the pitch gains are in Q14, the code gains are in Q11 and  *
+     * are multiply by gcode0 which have been multiply by 2^exp_gcode0.        *
+     * Also when we compute g_pitch*g_pitch, g_code*g_code and g_pitch*g_code  *
+     * we divide by 2^15.                                                      *
+     * Considering all the scaling above we have:                              *
+     *                                                                         *
+     *   exp_code = exp_gcode0-11+15 = exp_gcode0+4                            *
+     *                                                                         *
+     *   g_pitch*g_pitch  = -14-14+15                                          *
+     *   g_pitch          = -14                                                *
+     *   g_code*g_code    = (2*exp_code)+15                                    *
+     *   g_code           = exp_code                                           *
+     *   g_pitch*g_code   = -14 + exp_code +15                                 *
+     *                                                                         *
+     *   g_pitch*g_pitch * coeff[0]  ;exp_max0 = exp_coeff[0] - 13             *
+     *   g_pitch         * coeff[1]  ;exp_max1 = exp_coeff[1] - 14             *
+     *   g_code*g_code   * coeff[2]  ;exp_max2 = exp_coeff[2] +15+(2*exp_code) *
+     *   g_code          * coeff[3]  ;exp_max3 = exp_coeff[3] + exp_code       *
+     *   g_pitch*g_code  * coeff[4]  ;exp_max4 = exp_coeff[4] + 1 + exp_code   *
+     *-------------------------------------------------------------------------*/
 
-	exp_code = (exp_gcode0 + 4);
-	exp_max[0] = (exp_coeff[0] - 13);
-	exp_max[1] = (exp_coeff[1] - 14);
-	exp_max[2] = (exp_coeff[2] + (15 + (exp_code << 1)));
-	exp_max[3] = (exp_coeff[3] + exp_code);
-	exp_max[4] = (exp_coeff[4] + (1 + exp_code));
+    exp_code = (exp_gcode0 + 4);
+    exp_max[0] = (exp_coeff[0] - 13);
+    exp_max[1] = (exp_coeff[1] - 14);
+    exp_max[2] = (exp_coeff[2] + (15 + (exp_code << 1)));
+    exp_max[3] = (exp_coeff[3] + exp_code);
+    exp_max[4] = (exp_coeff[4] + (1 + exp_code));
 
-	/* Find maximum exponant */
+    /* Find maximum exponant */
 
-	e_max = exp_max[0];
-	for (i = 1; i < 5; i++)
-	{
-		if(exp_max[i] > e_max)
-		{
-			e_max = exp_max[i];
-		}
-	}
+    e_max = exp_max[0];
+    for (i = 1; i < 5; i++)
+    {
+        if(exp_max[i] > e_max)
+        {
+            e_max = exp_max[i];
+        }
+    }
 
-	/* align coeff[] and save in special 32 bit double precision */
+    /* align coeff[] and save in special 32 bit double precision */
 
-	for (i = 0; i < 5; i++)
-	{
-		j = add1(vo_sub(e_max, exp_max[i]), 2);/* /4 to avoid overflow */
-		L_tmp = L_deposit_h(coeff[i]);
-		L_tmp = L_shr(L_tmp, j);
-		VO_L_Extract(L_tmp, &coeff[i], &coeff_lo[i]);
-		coeff_lo[i] = (coeff_lo[i] >> 3);   /* lo >> 3 */
-	}
+    for (i = 0; i < 5; i++)
+    {
+        j = add1(vo_sub(e_max, exp_max[i]), 2);/* /4 to avoid overflow */
+        L_tmp = L_deposit_h(coeff[i]);
+        L_tmp = L_shr(L_tmp, j);
+        VO_L_Extract(L_tmp, &coeff[i], &coeff_lo[i]);
+        coeff_lo[i] = (coeff_lo[i] >> 3);   /* lo >> 3 */
+    }
 
-	/* Codebook search */
-	dist_min = MAX_32;
-	p = &t_qua_gain[min_ind << 1];
+    /* Codebook search */
+    dist_min = MAX_32;
+    p = &t_qua_gain[min_ind << 1];
 
-	index = 0;
-	for (i = 0; i < size; i++)
-	{
-		g_pitch = *p++;
-		g_code = *p++;
+    index = 0;
+    for (i = 0; i < size; i++)
+    {
+        g_pitch = *p++;
+        g_code = *p++;
 
-		g_code = ((g_code * gcode0) + 0x4000)>>15;
-		g2_pitch = ((g_pitch * g_pitch) + 0x4000)>>15;
-		g_pit_cod = ((g_code * g_pitch) + 0x4000)>>15;
-		L_tmp = (g_code * g_code)<<1;
-		VO_L_Extract(L_tmp, &g2_code, &g2_code_lo);
+        g_code = ((g_code * gcode0) + 0x4000)>>15;
+        g2_pitch = ((g_pitch * g_pitch) + 0x4000)>>15;
+        g_pit_cod = ((g_code * g_pitch) + 0x4000)>>15;
+        L_tmp = (g_code * g_code)<<1;
+        VO_L_Extract(L_tmp, &g2_code, &g2_code_lo);
 
-		L_tmp = (coeff[2] * g2_code_lo)<<1;
-		L_tmp =  (L_tmp >> 3);
-		L_tmp += (coeff_lo[0] * g2_pitch)<<1;
-		L_tmp += (coeff_lo[1] * g_pitch)<<1;
-		L_tmp += (coeff_lo[2] * g2_code)<<1;
-		L_tmp += (coeff_lo[3] * g_code)<<1;
-		L_tmp += (coeff_lo[4] * g_pit_cod)<<1;
-		L_tmp =  (L_tmp >> 12);
-		L_tmp += (coeff[0] * g2_pitch)<<1;
-		L_tmp += (coeff[1] * g_pitch)<<1;
-		L_tmp += (coeff[2] * g2_code)<<1;
-		L_tmp += (coeff[3] * g_code)<<1;
-		L_tmp += (coeff[4] * g_pit_cod)<<1;
+        L_tmp = (coeff[2] * g2_code_lo)<<1;
+        L_tmp =  (L_tmp >> 3);
+        L_tmp += (coeff_lo[0] * g2_pitch)<<1;
+        L_tmp += (coeff_lo[1] * g_pitch)<<1;
+        L_tmp += (coeff_lo[2] * g2_code)<<1;
+        L_tmp += (coeff_lo[3] * g_code)<<1;
+        L_tmp += (coeff_lo[4] * g_pit_cod)<<1;
+        L_tmp =  (L_tmp >> 12);
+        L_tmp += (coeff[0] * g2_pitch)<<1;
+        L_tmp += (coeff[1] * g_pitch)<<1;
+        L_tmp += (coeff[2] * g2_code)<<1;
+        L_tmp += (coeff[3] * g_code)<<1;
+        L_tmp += (coeff[4] * g_pit_cod)<<1;
 
-		if(L_tmp < dist_min)
-		{
-			dist_min = L_tmp;
-			index = i;
-		}
-	}
+        if(L_tmp < dist_min)
+        {
+            dist_min = L_tmp;
+            index = i;
+        }
+    }
 
-	/* Read the quantized gains */
-	index = index + min_ind;
-	p = &t_qua_gain[(index + index)];
-	*gain_pit = *p++;                       /* selected pitch gain in Q14 */
-	g_code = *p++;                          /* selected code gain in Q11  */
+    /* Read the quantized gains */
+    index = index + min_ind;
+    p = &t_qua_gain[(index + index)];
+    *gain_pit = *p++;                       /* selected pitch gain in Q14 */
+    g_code = *p++;                          /* selected code gain in Q11  */
 
-	L_tmp = vo_L_mult(g_code, gcode0);             /* Q11*Q0 -> Q12 */
-	L_tmp = L_shl(L_tmp, (exp_gcode0 + 4));   /* Q12 -> Q16 */
+    L_tmp = vo_L_mult(g_code, gcode0);             /* Q11*Q0 -> Q12 */
+    L_tmp = L_shl(L_tmp, (exp_gcode0 + 4));   /* Q12 -> Q16 */
 
-	*gain_cod = L_tmp;                       /* gain of code in Q16 */
+    *gain_cod = L_tmp;                       /* gain of code in Q16 */
 
-	/*---------------------------------------------------*
-	 * qua_ener = 20*log10(g_code)                       *
-	 *          = 6.0206*log2(g_code)                    *
-	 *          = 6.0206*(log2(g_codeQ11) - 11)          *
-	 *---------------------------------------------------*/
+    /*---------------------------------------------------*
+     * qua_ener = 20*log10(g_code)                       *
+     *          = 6.0206*log2(g_code)                    *
+     *          = 6.0206*(log2(g_codeQ11) - 11)          *
+     *---------------------------------------------------*/
 
-	L_tmp = L_deposit_l(g_code);
-	Log2(L_tmp, &exp, &frac);
-	exp -= 11;
-	L_tmp = Mpy_32_16(exp, frac, 24660);   /* x 6.0206 in Q12 */
+    L_tmp = L_deposit_l(g_code);
+    Log2(L_tmp, &exp, &frac);
+    exp -= 11;
+    L_tmp = Mpy_32_16(exp, frac, 24660);   /* x 6.0206 in Q12 */
 
-	qua_ener = (Word16)(L_tmp >> 3); /* result in Q10 */
+    qua_ener = (Word16)(L_tmp >> 3); /* result in Q10 */
 
-	/* update table of past quantized energies */
+    /* update table of past quantized energies */
 
-	past_qua_en[3] = past_qua_en[2];
-	past_qua_en[2] = past_qua_en[1];
-	past_qua_en[1] = past_qua_en[0];
-	past_qua_en[0] = qua_ener;
+    past_qua_en[3] = past_qua_en[2];
+    past_qua_en[2] = past_qua_en[1];
+    past_qua_en[1] = past_qua_en[0];
+    past_qua_en[0] = qua_ener;
 
-	return (index);
+    return (index);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/q_pulse.c b/media/libstagefright/codecs/amrwbenc/src/q_pulse.c
index d658602..fe0bdda 100644
--- a/media/libstagefright/codecs/amrwbenc/src/q_pulse.c
+++ b/media/libstagefright/codecs/amrwbenc/src/q_pulse.c
@@ -29,372 +29,372 @@
 #define NB_POS 16                          /* pos in track, mask for sign bit */
 
 Word32 quant_1p_N1(                        /* (o) return N+1 bits             */
-		Word16 pos,                        /* (i) position of the pulse       */
-		Word16 N)                          /* (i) number of bits for position */
+        Word16 pos,                        /* (i) position of the pulse       */
+        Word16 N)                          /* (i) number of bits for position */
 {
-	Word16 mask;
-	Word32 index;
+    Word16 mask;
+    Word32 index;
 
-	mask = (1 << N) - 1;              /* mask = ((1<<N)-1); */
-	/*-------------------------------------------------------*
-	 * Quantization of 1 pulse with N+1 bits:                *
-	 *-------------------------------------------------------*/
-	index = L_deposit_l((Word16) (pos & mask));
-	if ((pos & NB_POS) != 0)
-	{
-		index = vo_L_add(index, L_deposit_l(1 << N));   /* index += 1 << N; */
-	}
-	return (index);
+    mask = (1 << N) - 1;              /* mask = ((1<<N)-1); */
+    /*-------------------------------------------------------*
+     * Quantization of 1 pulse with N+1 bits:                *
+     *-------------------------------------------------------*/
+    index = L_deposit_l((Word16) (pos & mask));
+    if ((pos & NB_POS) != 0)
+    {
+        index = vo_L_add(index, L_deposit_l(1 << N));   /* index += 1 << N; */
+    }
+    return (index);
 }
 
 
 Word32 quant_2p_2N1(                       /* (o) return (2*N)+1 bits         */
-		Word16 pos1,                          /* (i) position of the pulse 1     */
-		Word16 pos2,                          /* (i) position of the pulse 2     */
-		Word16 N)                             /* (i) number of bits for position */
+        Word16 pos1,                          /* (i) position of the pulse 1     */
+        Word16 pos2,                          /* (i) position of the pulse 2     */
+        Word16 N)                             /* (i) number of bits for position */
 {
-	Word16 mask, tmp;
-	Word32 index;
-	mask = (1 << N) - 1;              /* mask = ((1<<N)-1); */
-	/*-------------------------------------------------------*
-	 * Quantization of 2 pulses with 2*N+1 bits:             *
-	 *-------------------------------------------------------*/
-	if (((pos2 ^ pos1) & NB_POS) == 0)
-	{
-		/* sign of 1st pulse == sign of 2th pulse */
-		if(pos1 <= pos2)          /* ((pos1 - pos2) <= 0) */
-		{
-			/* index = ((pos1 & mask) << N) + (pos2 & mask); */
-			index = L_deposit_l(add1((((Word16) (pos1 & mask)) << N), ((Word16) (pos2 & mask))));
-		} else
-		{
-			/* ((pos2 & mask) << N) + (pos1 & mask); */
-			index = L_deposit_l(add1((((Word16) (pos2 & mask)) << N), ((Word16) (pos1 & mask))));
-		}
-		if ((pos1 & NB_POS) != 0)
-		{
-			tmp = (N << 1);
-			index = vo_L_add(index, (1L << tmp));       /* index += 1 << (2*N); */
-		}
-	} else
-	{
-		/* sign of 1st pulse != sign of 2th pulse */
-		if (vo_sub((Word16) (pos1 & mask), (Word16) (pos2 & mask)) <= 0)
-		{
-			/* index = ((pos2 & mask) << N) + (pos1 & mask); */
-			index = L_deposit_l(add1((((Word16) (pos2 & mask)) << N), ((Word16) (pos1 & mask))));
-			if ((pos2 & NB_POS) != 0)
-			{
-				tmp = (N << 1);           /* index += 1 << (2*N); */
-				index = vo_L_add(index, (1L << tmp));
-			}
-		} else
-		{
-			/* index = ((pos1 & mask) << N) + (pos2 & mask);	 */
-			index = L_deposit_l(add1((((Word16) (pos1 & mask)) << N), ((Word16) (pos2 & mask))));
-			if ((pos1 & NB_POS) != 0)
-			{
-				tmp = (N << 1);
-				index = vo_L_add(index, (1 << tmp));    /* index += 1 << (2*N); */
-			}
-		}
-	}
-	return (index);
+    Word16 mask, tmp;
+    Word32 index;
+    mask = (1 << N) - 1;              /* mask = ((1<<N)-1); */
+    /*-------------------------------------------------------*
+     * Quantization of 2 pulses with 2*N+1 bits:             *
+     *-------------------------------------------------------*/
+    if (((pos2 ^ pos1) & NB_POS) == 0)
+    {
+        /* sign of 1st pulse == sign of 2th pulse */
+        if(pos1 <= pos2)          /* ((pos1 - pos2) <= 0) */
+        {
+            /* index = ((pos1 & mask) << N) + (pos2 & mask); */
+            index = L_deposit_l(add1((((Word16) (pos1 & mask)) << N), ((Word16) (pos2 & mask))));
+        } else
+        {
+            /* ((pos2 & mask) << N) + (pos1 & mask); */
+            index = L_deposit_l(add1((((Word16) (pos2 & mask)) << N), ((Word16) (pos1 & mask))));
+        }
+        if ((pos1 & NB_POS) != 0)
+        {
+            tmp = (N << 1);
+            index = vo_L_add(index, (1L << tmp));       /* index += 1 << (2*N); */
+        }
+    } else
+    {
+        /* sign of 1st pulse != sign of 2th pulse */
+        if (vo_sub((Word16) (pos1 & mask), (Word16) (pos2 & mask)) <= 0)
+        {
+            /* index = ((pos2 & mask) << N) + (pos1 & mask); */
+            index = L_deposit_l(add1((((Word16) (pos2 & mask)) << N), ((Word16) (pos1 & mask))));
+            if ((pos2 & NB_POS) != 0)
+            {
+                tmp = (N << 1);           /* index += 1 << (2*N); */
+                index = vo_L_add(index, (1L << tmp));
+            }
+        } else
+        {
+            /* index = ((pos1 & mask) << N) + (pos2 & mask);     */
+            index = L_deposit_l(add1((((Word16) (pos1 & mask)) << N), ((Word16) (pos2 & mask))));
+            if ((pos1 & NB_POS) != 0)
+            {
+                tmp = (N << 1);
+                index = vo_L_add(index, (1 << tmp));    /* index += 1 << (2*N); */
+            }
+        }
+    }
+    return (index);
 }
 
 
 Word32 quant_3p_3N1(                       /* (o) return (3*N)+1 bits         */
-		Word16 pos1,                          /* (i) position of the pulse 1     */
-		Word16 pos2,                          /* (i) position of the pulse 2     */
-		Word16 pos3,                          /* (i) position of the pulse 3     */
-		Word16 N)                             /* (i) number of bits for position */
+        Word16 pos1,                          /* (i) position of the pulse 1     */
+        Word16 pos2,                          /* (i) position of the pulse 2     */
+        Word16 pos3,                          /* (i) position of the pulse 3     */
+        Word16 N)                             /* (i) number of bits for position */
 {
-	Word16 nb_pos;
-	Word32 index;
+    Word16 nb_pos;
+    Word32 index;
 
-	nb_pos =(1 <<(N - 1));            /* nb_pos = (1<<(N-1)); */
-	/*-------------------------------------------------------*
-	 * Quantization of 3 pulses with 3*N+1 bits:             *
-	 *-------------------------------------------------------*/
-	if (((pos1 ^ pos2) & nb_pos) == 0)
-	{
-		index = quant_2p_2N1(pos1, pos2, sub(N, 1));    /* index = quant_2p_2N1(pos1, pos2, (N-1)); */
-		/* index += (pos1 & nb_pos) << N; */
-		index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
-		/* index += quant_1p_N1(pos3, N) << (2*N); */
-		index = vo_L_add(index, (quant_1p_N1(pos3, N)<<(N << 1)));
+    nb_pos =(1 <<(N - 1));            /* nb_pos = (1<<(N-1)); */
+    /*-------------------------------------------------------*
+     * Quantization of 3 pulses with 3*N+1 bits:             *
+     *-------------------------------------------------------*/
+    if (((pos1 ^ pos2) & nb_pos) == 0)
+    {
+        index = quant_2p_2N1(pos1, pos2, sub(N, 1));    /* index = quant_2p_2N1(pos1, pos2, (N-1)); */
+        /* index += (pos1 & nb_pos) << N; */
+        index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
+        /* index += quant_1p_N1(pos3, N) << (2*N); */
+        index = vo_L_add(index, (quant_1p_N1(pos3, N)<<(N << 1)));
 
-	} else if (((pos1 ^ pos3) & nb_pos) == 0)
-	{
-		index = quant_2p_2N1(pos1, pos3, sub(N, 1));    /* index = quant_2p_2N1(pos1, pos3, (N-1)); */
-		index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
-		/* index += (pos1 & nb_pos) << N; */
-		index = vo_L_add(index, (quant_1p_N1(pos2, N) << (N << 1)));
-		/* index += quant_1p_N1(pos2, N) <<
-		 * (2*N); */
-	} else
-	{
-		index = quant_2p_2N1(pos2, pos3, (N - 1));    /* index = quant_2p_2N1(pos2, pos3, (N-1)); */
-		/* index += (pos2 & nb_pos) << N;			 */
-		index = vo_L_add(index, (L_deposit_l((Word16) (pos2 & nb_pos)) << N));
-		/* index += quant_1p_N1(pos1, N) << (2*N);	 */
-		index = vo_L_add(index, (quant_1p_N1(pos1, N) << (N << 1)));
-	}
-	return (index);
+    } else if (((pos1 ^ pos3) & nb_pos) == 0)
+    {
+        index = quant_2p_2N1(pos1, pos3, sub(N, 1));    /* index = quant_2p_2N1(pos1, pos3, (N-1)); */
+        index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
+        /* index += (pos1 & nb_pos) << N; */
+        index = vo_L_add(index, (quant_1p_N1(pos2, N) << (N << 1)));
+        /* index += quant_1p_N1(pos2, N) <<
+         * (2*N); */
+    } else
+    {
+        index = quant_2p_2N1(pos2, pos3, (N - 1));    /* index = quant_2p_2N1(pos2, pos3, (N-1)); */
+        /* index += (pos2 & nb_pos) << N;            */
+        index = vo_L_add(index, (L_deposit_l((Word16) (pos2 & nb_pos)) << N));
+        /* index += quant_1p_N1(pos1, N) << (2*N);   */
+        index = vo_L_add(index, (quant_1p_N1(pos1, N) << (N << 1)));
+    }
+    return (index);
 }
 
 
 Word32 quant_4p_4N1(                       /* (o) return (4*N)+1 bits         */
-		Word16 pos1,                          /* (i) position of the pulse 1     */
-		Word16 pos2,                          /* (i) position of the pulse 2     */
-		Word16 pos3,                          /* (i) position of the pulse 3     */
-		Word16 pos4,                          /* (i) position of the pulse 4     */
-		Word16 N)                             /* (i) number of bits for position */
+        Word16 pos1,                          /* (i) position of the pulse 1     */
+        Word16 pos2,                          /* (i) position of the pulse 2     */
+        Word16 pos3,                          /* (i) position of the pulse 3     */
+        Word16 pos4,                          /* (i) position of the pulse 4     */
+        Word16 N)                             /* (i) number of bits for position */
 {
-	Word16 nb_pos;
-	Word32 index;
+    Word16 nb_pos;
+    Word32 index;
 
-	nb_pos = 1 << (N - 1);            /* nb_pos = (1<<(N-1));  */
-	/*-------------------------------------------------------*
-	 * Quantization of 4 pulses with 4*N+1 bits:             *
-	 *-------------------------------------------------------*/
-	if (((pos1 ^ pos2) & nb_pos) == 0)
-	{
-		index = quant_2p_2N1(pos1, pos2, sub(N, 1));    /* index = quant_2p_2N1(pos1, pos2, (N-1)); */
-		/* index += (pos1 & nb_pos) << N;	 */
-		index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
-		/* index += quant_2p_2N1(pos3, pos4, N) << (2*N); */
-		index = vo_L_add(index, (quant_2p_2N1(pos3, pos4, N) << (N << 1)));
-	} else if (((pos1 ^ pos3) & nb_pos) == 0)
-	{
-		index = quant_2p_2N1(pos1, pos3, (N - 1));
-		/* index += (pos1 & nb_pos) << N; */
-		index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
-		/* index += quant_2p_2N1(pos2, pos4, N) << (2*N); */
-		index = vo_L_add(index, (quant_2p_2N1(pos2, pos4, N) << (N << 1)));
-	} else
-	{
-		index = quant_2p_2N1(pos2, pos3, (N - 1));
-		/* index += (pos2 & nb_pos) << N; */
-		index = vo_L_add(index, (L_deposit_l((Word16) (pos2 & nb_pos)) << N));
-		/* index += quant_2p_2N1(pos1, pos4, N) << (2*N); */
-		index = vo_L_add(index, (quant_2p_2N1(pos1, pos4, N) << (N << 1)));
-	}
-	return (index);
+    nb_pos = 1 << (N - 1);            /* nb_pos = (1<<(N-1));  */
+    /*-------------------------------------------------------*
+     * Quantization of 4 pulses with 4*N+1 bits:             *
+     *-------------------------------------------------------*/
+    if (((pos1 ^ pos2) & nb_pos) == 0)
+    {
+        index = quant_2p_2N1(pos1, pos2, sub(N, 1));    /* index = quant_2p_2N1(pos1, pos2, (N-1)); */
+        /* index += (pos1 & nb_pos) << N;    */
+        index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
+        /* index += quant_2p_2N1(pos3, pos4, N) << (2*N); */
+        index = vo_L_add(index, (quant_2p_2N1(pos3, pos4, N) << (N << 1)));
+    } else if (((pos1 ^ pos3) & nb_pos) == 0)
+    {
+        index = quant_2p_2N1(pos1, pos3, (N - 1));
+        /* index += (pos1 & nb_pos) << N; */
+        index = vo_L_add(index, (L_deposit_l((Word16) (pos1 & nb_pos)) << N));
+        /* index += quant_2p_2N1(pos2, pos4, N) << (2*N); */
+        index = vo_L_add(index, (quant_2p_2N1(pos2, pos4, N) << (N << 1)));
+    } else
+    {
+        index = quant_2p_2N1(pos2, pos3, (N - 1));
+        /* index += (pos2 & nb_pos) << N; */
+        index = vo_L_add(index, (L_deposit_l((Word16) (pos2 & nb_pos)) << N));
+        /* index += quant_2p_2N1(pos1, pos4, N) << (2*N); */
+        index = vo_L_add(index, (quant_2p_2N1(pos1, pos4, N) << (N << 1)));
+    }
+    return (index);
 }
 
 
 Word32 quant_4p_4N(                        /* (o) return 4*N bits             */
-		Word16 pos[],                         /* (i) position of the pulse 1..4  */
-		Word16 N)                             /* (i) number of bits for position */
+        Word16 pos[],                         /* (i) position of the pulse 1..4  */
+        Word16 N)                             /* (i) number of bits for position */
 {
-	Word16 nb_pos, mask __unused, n_1, tmp;
-	Word16 posA[4], posB[4];
-	Word32 i, j, k, index;
+    Word16 nb_pos, mask __unused, n_1, tmp;
+    Word16 posA[4], posB[4];
+    Word32 i, j, k, index;
 
-	n_1 = (Word16) (N - 1);
-	nb_pos = (1 << n_1);                  /* nb_pos = (1<<n_1); */
-	mask = vo_sub((1 << N), 1);              /* mask = ((1<<N)-1); */
+    n_1 = (Word16) (N - 1);
+    nb_pos = (1 << n_1);                  /* nb_pos = (1<<n_1); */
+    mask = vo_sub((1 << N), 1);              /* mask = ((1<<N)-1); */
 
-	i = 0;
-	j = 0;
-	for (k = 0; k < 4; k++)
-	{
-		if ((pos[k] & nb_pos) == 0)
-		{
-			posA[i++] = pos[k];
-		} else
-		{
-			posB[j++] = pos[k];
-		}
-	}
+    i = 0;
+    j = 0;
+    for (k = 0; k < 4; k++)
+    {
+        if ((pos[k] & nb_pos) == 0)
+        {
+            posA[i++] = pos[k];
+        } else
+        {
+            posB[j++] = pos[k];
+        }
+    }
 
-	switch (i)
-	{
-		case 0:
-			tmp = vo_sub((N << 2), 3);           /* index = 1 << ((4*N)-3); */
-			index = (1L << tmp);
-			/* index += quant_4p_4N1(posB[0], posB[1], posB[2], posB[3], n_1); */
-			index = vo_L_add(index, quant_4p_4N1(posB[0], posB[1], posB[2], posB[3], n_1));
-			break;
-		case 1:
-			/* index = quant_1p_N1(posA[0], n_1) << ((3*n_1)+1); */
-			tmp = add1((Word16)((vo_L_mult(3, n_1) >> 1)), 1);
-			index = L_shl(quant_1p_N1(posA[0], n_1), tmp);
-			/* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1); */
-			index = vo_L_add(index, quant_3p_3N1(posB[0], posB[1], posB[2], n_1));
-			break;
-		case 2:
-			tmp = ((n_1 << 1) + 1);         /* index = quant_2p_2N1(posA[0], posA[1], n_1) << ((2*n_1)+1); */
-			index = L_shl(quant_2p_2N1(posA[0], posA[1], n_1), tmp);
-			/* index += quant_2p_2N1(posB[0], posB[1], n_1); */
-			index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], n_1));
-			break;
-		case 3:
-			/* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << N; */
-			index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), N);
-			index = vo_L_add(index, quant_1p_N1(posB[0], n_1));        /* index += quant_1p_N1(posB[0], n_1); */
-			break;
-		case 4:
-			index = quant_4p_4N1(posA[0], posA[1], posA[2], posA[3], n_1);
-			break;
-		default:
-			index = 0;
-			fprintf(stderr, "Error in function quant_4p_4N\n");
-	}
-	tmp = ((N << 2) - 2);               /* index += (i & 3) << ((4*N)-2); */
-	index = vo_L_add(index, L_shl((L_deposit_l(i) & (3L)), tmp));
+    switch (i)
+    {
+        case 0:
+            tmp = vo_sub((N << 2), 3);           /* index = 1 << ((4*N)-3); */
+            index = (1L << tmp);
+            /* index += quant_4p_4N1(posB[0], posB[1], posB[2], posB[3], n_1); */
+            index = vo_L_add(index, quant_4p_4N1(posB[0], posB[1], posB[2], posB[3], n_1));
+            break;
+        case 1:
+            /* index = quant_1p_N1(posA[0], n_1) << ((3*n_1)+1); */
+            tmp = add1((Word16)((vo_L_mult(3, n_1) >> 1)), 1);
+            index = L_shl(quant_1p_N1(posA[0], n_1), tmp);
+            /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1); */
+            index = vo_L_add(index, quant_3p_3N1(posB[0], posB[1], posB[2], n_1));
+            break;
+        case 2:
+            tmp = ((n_1 << 1) + 1);         /* index = quant_2p_2N1(posA[0], posA[1], n_1) << ((2*n_1)+1); */
+            index = L_shl(quant_2p_2N1(posA[0], posA[1], n_1), tmp);
+            /* index += quant_2p_2N1(posB[0], posB[1], n_1); */
+            index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], n_1));
+            break;
+        case 3:
+            /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << N; */
+            index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), N);
+            index = vo_L_add(index, quant_1p_N1(posB[0], n_1));        /* index += quant_1p_N1(posB[0], n_1); */
+            break;
+        case 4:
+            index = quant_4p_4N1(posA[0], posA[1], posA[2], posA[3], n_1);
+            break;
+        default:
+            index = 0;
+            fprintf(stderr, "Error in function quant_4p_4N\n");
+    }
+    tmp = ((N << 2) - 2);               /* index += (i & 3) << ((4*N)-2); */
+    index = vo_L_add(index, L_shl((L_deposit_l(i) & (3L)), tmp));
 
-	return (index);
+    return (index);
 }
 
 
 
 Word32 quant_5p_5N(                        /* (o) return 5*N bits             */
-		Word16 pos[],                         /* (i) position of the pulse 1..5  */
-		Word16 N)                             /* (i) number of bits for position */
+        Word16 pos[],                         /* (i) position of the pulse 1..5  */
+        Word16 N)                             /* (i) number of bits for position */
 {
-	Word16 nb_pos, n_1, tmp;
-	Word16 posA[5], posB[5];
-	Word32 i, j, k, index, tmp2;
+    Word16 nb_pos, n_1, tmp;
+    Word16 posA[5], posB[5];
+    Word32 i, j, k, index, tmp2;
 
-	n_1 = (Word16) (N - 1);
-	nb_pos = (1 << n_1);                  /* nb_pos = (1<<n_1); */
+    n_1 = (Word16) (N - 1);
+    nb_pos = (1 << n_1);                  /* nb_pos = (1<<n_1); */
 
-	i = 0;
-	j = 0;
-	for (k = 0; k < 5; k++)
-	{
-		if ((pos[k] & nb_pos) == 0)
-		{
-			posA[i++] = pos[k];
-		} else
-		{
-			posB[j++] = pos[k];
-		}
-	}
+    i = 0;
+    j = 0;
+    for (k = 0; k < 5; k++)
+    {
+        if ((pos[k] & nb_pos) == 0)
+        {
+            posA[i++] = pos[k];
+        } else
+        {
+            posB[j++] = pos[k];
+        }
+    }
 
-	switch (i)
-	{
-		case 0:
-			tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1);        /* ((5*N)-1)) */
-			index = L_shl(1L, tmp);   /* index = 1 << ((5*N)-1); */
-			tmp = add1((N << 1), 1);  /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) << ((2*N)+1);*/
-			tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
-			index = vo_L_add(index, tmp2);
-			index = vo_L_add(index, quant_2p_2N1(posB[3], posB[4], N));        /* index += quant_2p_2N1(posB[3], posB[4], N); */
-			break;
-		case 1:
-			tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1);        /* index = 1 << ((5*N)-1); */
-			index = L_shl(1L, tmp);
-			tmp = add1((N << 1), 1);   /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) <<((2*N)+1);  */
-			tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
-			index = vo_L_add(index, tmp2);
-			index = vo_L_add(index, quant_2p_2N1(posB[3], posA[0], N));        /* index += quant_2p_2N1(posB[3], posA[0], N); */
-			break;
-		case 2:
-			tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1);        /* ((5*N)-1)) */
-			index = L_shl(1L, tmp);            /* index = 1 << ((5*N)-1); */
-			tmp = add1((N << 1), 1);           /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) << ((2*N)+1);  */
-			tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
-			index = vo_L_add(index, tmp2);
-			index = vo_L_add(index, quant_2p_2N1(posA[0], posA[1], N));        /* index += quant_2p_2N1(posA[0], posA[1], N); */
-			break;
-		case 3:
-			tmp = add1((N << 1), 1);           /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1);  */
-			index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
-			index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], N));        /* index += quant_2p_2N1(posB[0], posB[1], N); */
-			break;
-		case 4:
-			tmp = add1((N << 1), 1);           /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1);  */
-			index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
-			index = vo_L_add(index, quant_2p_2N1(posA[3], posB[0], N));        /* index += quant_2p_2N1(posA[3], posB[0], N); */
-			break;
-		case 5:
-			tmp = add1((N << 1), 1);           /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1);  */
-			index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
-			index = vo_L_add(index, quant_2p_2N1(posA[3], posA[4], N));        /* index += quant_2p_2N1(posA[3], posA[4], N); */
-			break;
-		default:
-			index = 0;
-			fprintf(stderr, "Error in function quant_5p_5N\n");
-	}
+    switch (i)
+    {
+        case 0:
+            tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1);        /* ((5*N)-1)) */
+            index = L_shl(1L, tmp);   /* index = 1 << ((5*N)-1); */
+            tmp = add1((N << 1), 1);  /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) << ((2*N)+1);*/
+            tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
+            index = vo_L_add(index, tmp2);
+            index = vo_L_add(index, quant_2p_2N1(posB[3], posB[4], N));        /* index += quant_2p_2N1(posB[3], posB[4], N); */
+            break;
+        case 1:
+            tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1);        /* index = 1 << ((5*N)-1); */
+            index = L_shl(1L, tmp);
+            tmp = add1((N << 1), 1);   /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) <<((2*N)+1);  */
+            tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
+            index = vo_L_add(index, tmp2);
+            index = vo_L_add(index, quant_2p_2N1(posB[3], posA[0], N));        /* index += quant_2p_2N1(posB[3], posA[0], N); */
+            break;
+        case 2:
+            tmp = vo_sub((Word16)((vo_L_mult(5, N) >> 1)), 1);        /* ((5*N)-1)) */
+            index = L_shl(1L, tmp);            /* index = 1 << ((5*N)-1); */
+            tmp = add1((N << 1), 1);           /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1) << ((2*N)+1);  */
+            tmp2 = L_shl(quant_3p_3N1(posB[0], posB[1], posB[2], n_1), tmp);
+            index = vo_L_add(index, tmp2);
+            index = vo_L_add(index, quant_2p_2N1(posA[0], posA[1], N));        /* index += quant_2p_2N1(posA[0], posA[1], N); */
+            break;
+        case 3:
+            tmp = add1((N << 1), 1);           /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1);  */
+            index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
+            index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], N));        /* index += quant_2p_2N1(posB[0], posB[1], N); */
+            break;
+        case 4:
+            tmp = add1((N << 1), 1);           /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1);  */
+            index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
+            index = vo_L_add(index, quant_2p_2N1(posA[3], posB[0], N));        /* index += quant_2p_2N1(posA[3], posB[0], N); */
+            break;
+        case 5:
+            tmp = add1((N << 1), 1);           /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((2*N)+1);  */
+            index = L_shl(quant_3p_3N1(posA[0], posA[1], posA[2], n_1), tmp);
+            index = vo_L_add(index, quant_2p_2N1(posA[3], posA[4], N));        /* index += quant_2p_2N1(posA[3], posA[4], N); */
+            break;
+        default:
+            index = 0;
+            fprintf(stderr, "Error in function quant_5p_5N\n");
+    }
 
-	return (index);
+    return (index);
 }
 
 
 Word32 quant_6p_6N_2(                      /* (o) return (6*N)-2 bits         */
-		Word16 pos[],                         /* (i) position of the pulse 1..6  */
-		Word16 N)                             /* (i) number of bits for position */
+        Word16 pos[],                         /* (i) position of the pulse 1..6  */
+        Word16 N)                             /* (i) number of bits for position */
 {
-	Word16 nb_pos, n_1;
-	Word16 posA[6], posB[6];
-	Word32 i, j, k, index;
+    Word16 nb_pos, n_1;
+    Word16 posA[6], posB[6];
+    Word32 i, j, k, index;
 
-	/* !!  N and n_1 are constants -> it doesn't need to be operated by Basic Operators */
-	n_1 = (Word16) (N - 1);
-	nb_pos = (1 << n_1);                  /* nb_pos = (1<<n_1); */
+    /* !!  N and n_1 are constants -> it doesn't need to be operated by Basic Operators */
+    n_1 = (Word16) (N - 1);
+    nb_pos = (1 << n_1);                  /* nb_pos = (1<<n_1); */
 
-	i = 0;
-	j = 0;
-	for (k = 0; k < 6; k++)
-	{
-		if ((pos[k] & nb_pos) == 0)
-		{
-			posA[i++] = pos[k];
-		} else
-		{
-			posB[j++] = pos[k];
-		}
-	}
+    i = 0;
+    j = 0;
+    for (k = 0; k < 6; k++)
+    {
+        if ((pos[k] & nb_pos) == 0)
+        {
+            posA[i++] = pos[k];
+        } else
+        {
+            posB[j++] = pos[k];
+        }
+    }
 
-	switch (i)
-	{
-		case 0:
-			index = (1 << (Word16) (6 * N - 5));        /* index = 1 << ((6*N)-5); */
-			index = vo_L_add(index, (quant_5p_5N(posB, n_1) << N)); /* index += quant_5p_5N(posB, n_1) << N; */
-			index = vo_L_add(index, quant_1p_N1(posB[5], n_1));        /* index += quant_1p_N1(posB[5], n_1); */
-			break;
-		case 1:
-			index = (1L << (Word16) (6 * N - 5));        /* index = 1 << ((6*N)-5); */
-			index = vo_L_add(index, (quant_5p_5N(posB, n_1) << N)); /* index += quant_5p_5N(posB, n_1) << N; */
-			index = vo_L_add(index, quant_1p_N1(posA[0], n_1));        /* index += quant_1p_N1(posA[0], n_1); */
-			break;
-		case 2:
-			index = (1L << (Word16) (6 * N - 5));        /* index = 1 << ((6*N)-5); */
-			/* index += quant_4p_4N(posB, n_1) << ((2*n_1)+1); */
-			index = vo_L_add(index, (quant_4p_4N(posB, n_1) << (Word16) (2 * n_1 + 1)));
-			index = vo_L_add(index, quant_2p_2N1(posA[0], posA[1], n_1));      /* index += quant_2p_2N1(posA[0], posA[1], n_1); */
-			break;
-		case 3:
-			index = (quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << (Word16) (3 * n_1 + 1));
-			                                  /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((3*n_1)+1); */
-			index =vo_L_add(index, quant_3p_3N1(posB[0], posB[1], posB[2], n_1));
-			                                 /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1); */
-			break;
-		case 4:
-			i = 2;
-			index = (quant_4p_4N(posA, n_1) << (Word16) (2 * n_1 + 1));  /* index = quant_4p_4N(posA, n_1) << ((2*n_1)+1); */
-			index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], n_1));      /* index += quant_2p_2N1(posB[0], posB[1], n_1); */
-			break;
-		case 5:
-			i = 1;
-			index = (quant_5p_5N(posA, n_1) << N);       /* index = quant_5p_5N(posA, n_1) << N; */
-			index = vo_L_add(index, quant_1p_N1(posB[0], n_1));        /* index += quant_1p_N1(posB[0], n_1); */
-			break;
-		case 6:
-			i = 0;
-			index = (quant_5p_5N(posA, n_1) << N);       /* index = quant_5p_5N(posA, n_1) << N; */
-			index = vo_L_add(index, quant_1p_N1(posA[5], n_1));        /* index += quant_1p_N1(posA[5], n_1); */
-			break;
-		default:
-			index = 0;
-			fprintf(stderr, "Error in function quant_6p_6N_2\n");
-	}
-	index = vo_L_add(index, ((L_deposit_l(i) & 3L) << (Word16) (6 * N - 4)));   /* index += (i & 3) << ((6*N)-4); */
+    switch (i)
+    {
+        case 0:
+            index = (1 << (Word16) (6 * N - 5));        /* index = 1 << ((6*N)-5); */
+            index = vo_L_add(index, (quant_5p_5N(posB, n_1) << N)); /* index += quant_5p_5N(posB, n_1) << N; */
+            index = vo_L_add(index, quant_1p_N1(posB[5], n_1));        /* index += quant_1p_N1(posB[5], n_1); */
+            break;
+        case 1:
+            index = (1L << (Word16) (6 * N - 5));        /* index = 1 << ((6*N)-5); */
+            index = vo_L_add(index, (quant_5p_5N(posB, n_1) << N)); /* index += quant_5p_5N(posB, n_1) << N; */
+            index = vo_L_add(index, quant_1p_N1(posA[0], n_1));        /* index += quant_1p_N1(posA[0], n_1); */
+            break;
+        case 2:
+            index = (1L << (Word16) (6 * N - 5));        /* index = 1 << ((6*N)-5); */
+            /* index += quant_4p_4N(posB, n_1) << ((2*n_1)+1); */
+            index = vo_L_add(index, (quant_4p_4N(posB, n_1) << (Word16) (2 * n_1 + 1)));
+            index = vo_L_add(index, quant_2p_2N1(posA[0], posA[1], n_1));      /* index += quant_2p_2N1(posA[0], posA[1], n_1); */
+            break;
+        case 3:
+            index = (quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << (Word16) (3 * n_1 + 1));
+                                              /* index = quant_3p_3N1(posA[0], posA[1], posA[2], n_1) << ((3*n_1)+1); */
+            index =vo_L_add(index, quant_3p_3N1(posB[0], posB[1], posB[2], n_1));
+                                             /* index += quant_3p_3N1(posB[0], posB[1], posB[2], n_1); */
+            break;
+        case 4:
+            i = 2;
+            index = (quant_4p_4N(posA, n_1) << (Word16) (2 * n_1 + 1));  /* index = quant_4p_4N(posA, n_1) << ((2*n_1)+1); */
+            index = vo_L_add(index, quant_2p_2N1(posB[0], posB[1], n_1));      /* index += quant_2p_2N1(posB[0], posB[1], n_1); */
+            break;
+        case 5:
+            i = 1;
+            index = (quant_5p_5N(posA, n_1) << N);       /* index = quant_5p_5N(posA, n_1) << N; */
+            index = vo_L_add(index, quant_1p_N1(posB[0], n_1));        /* index += quant_1p_N1(posB[0], n_1); */
+            break;
+        case 6:
+            i = 0;
+            index = (quant_5p_5N(posA, n_1) << N);       /* index = quant_5p_5N(posA, n_1) << N; */
+            index = vo_L_add(index, quant_1p_N1(posA[5], n_1));        /* index += quant_1p_N1(posA[5], n_1); */
+            break;
+        default:
+            index = 0;
+            fprintf(stderr, "Error in function quant_6p_6N_2\n");
+    }
+    index = vo_L_add(index, ((L_deposit_l(i) & 3L) << (Word16) (6 * N - 4)));   /* index += (i & 3) << ((6*N)-4); */
 
-	return (index);
+    return (index);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/qisf_ns.c b/media/libstagefright/codecs/amrwbenc/src/qisf_ns.c
index fc2f00d..eac98e2 100644
--- a/media/libstagefright/codecs/amrwbenc/src/qisf_ns.c
+++ b/media/libstagefright/codecs/amrwbenc/src/qisf_ns.c
@@ -33,30 +33,30 @@
 *------------------------------------------------------------------*/
 
 void Qisf_ns(
-		Word16 * isf1,                        /* input : ISF in the frequency domain (0..0.5) */
-		Word16 * isf_q,                       /* output: quantized ISF                        */
-		Word16 * indice                       /* output: quantization indices                 */
-	    )
+        Word16 * isf1,                        /* input : ISF in the frequency domain (0..0.5) */
+        Word16 * isf_q,                       /* output: quantized ISF                        */
+        Word16 * indice                       /* output: quantization indices                 */
+        )
 {
-	Word16 i;
-	Word32 tmp;
+    Word16 i;
+    Word32 tmp;
 
-	for (i = 0; i < ORDER; i++)
-	{
-		isf_q[i] = sub(isf1[i], mean_isf_noise[i]);
-	}
+    for (i = 0; i < ORDER; i++)
+    {
+        isf_q[i] = sub(isf1[i], mean_isf_noise[i]);
+    }
 
-	indice[0] = Sub_VQ(&isf_q[0], dico1_isf_noise, 2, SIZE_BK_NOISE1, &tmp);
-	indice[1] = Sub_VQ(&isf_q[2], dico2_isf_noise, 3, SIZE_BK_NOISE2, &tmp);
-	indice[2] = Sub_VQ(&isf_q[5], dico3_isf_noise, 3, SIZE_BK_NOISE3, &tmp);
-	indice[3] = Sub_VQ(&isf_q[8], dico4_isf_noise, 4, SIZE_BK_NOISE4, &tmp);
-	indice[4] = Sub_VQ(&isf_q[12], dico5_isf_noise, 4, SIZE_BK_NOISE5, &tmp);
+    indice[0] = Sub_VQ(&isf_q[0], dico1_isf_noise, 2, SIZE_BK_NOISE1, &tmp);
+    indice[1] = Sub_VQ(&isf_q[2], dico2_isf_noise, 3, SIZE_BK_NOISE2, &tmp);
+    indice[2] = Sub_VQ(&isf_q[5], dico3_isf_noise, 3, SIZE_BK_NOISE3, &tmp);
+    indice[3] = Sub_VQ(&isf_q[8], dico4_isf_noise, 4, SIZE_BK_NOISE4, &tmp);
+    indice[4] = Sub_VQ(&isf_q[12], dico5_isf_noise, 4, SIZE_BK_NOISE5, &tmp);
 
-	/* decoding the ISFs */
+    /* decoding the ISFs */
 
-	Disf_ns(indice, isf_q);
+    Disf_ns(indice, isf_q);
 
-	return;
+    return;
 }
 
 /********************************************************************
@@ -70,41 +70,41 @@
 *********************************************************************/
 
 void Disf_ns(
-		Word16 * indice,                      /* input:  quantization indices                  */
-		Word16 * isf_q                        /* input : ISF in the frequency domain (0..0.5)  */
-	    )
+        Word16 * indice,                      /* input:  quantization indices                  */
+        Word16 * isf_q                        /* input : ISF in the frequency domain (0..0.5)  */
+        )
 {
-	Word16 i;
+    Word16 i;
 
-	for (i = 0; i < 2; i++)
-	{
-		isf_q[i] = dico1_isf_noise[indice[0] * 2 + i];
-	}
-	for (i = 0; i < 3; i++)
-	{
-		isf_q[i + 2] = dico2_isf_noise[indice[1] * 3 + i];
-	}
-	for (i = 0; i < 3; i++)
-	{
-		isf_q[i + 5] = dico3_isf_noise[indice[2] * 3 + i];
-	}
-	for (i = 0; i < 4; i++)
-	{
-		isf_q[i + 8] = dico4_isf_noise[indice[3] * 4 + i];
-	}
-	for (i = 0; i < 4; i++)
-	{
-		isf_q[i + 12] = dico5_isf_noise[indice[4] * 4 + i];
-	}
+    for (i = 0; i < 2; i++)
+    {
+        isf_q[i] = dico1_isf_noise[indice[0] * 2 + i];
+    }
+    for (i = 0; i < 3; i++)
+    {
+        isf_q[i + 2] = dico2_isf_noise[indice[1] * 3 + i];
+    }
+    for (i = 0; i < 3; i++)
+    {
+        isf_q[i + 5] = dico3_isf_noise[indice[2] * 3 + i];
+    }
+    for (i = 0; i < 4; i++)
+    {
+        isf_q[i + 8] = dico4_isf_noise[indice[3] * 4 + i];
+    }
+    for (i = 0; i < 4; i++)
+    {
+        isf_q[i + 12] = dico5_isf_noise[indice[4] * 4 + i];
+    }
 
-	for (i = 0; i < ORDER; i++)
-	{
-		isf_q[i] = add(isf_q[i], mean_isf_noise[i]);
-	}
+    for (i = 0; i < ORDER; i++)
+    {
+        isf_q[i] = add(isf_q[i], mean_isf_noise[i]);
+    }
 
-	Reorder_isf(isf_q, ISF_GAP, ORDER);
+    Reorder_isf(isf_q, ISF_GAP, ORDER);
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c b/media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c
index c711cd0..bec334e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c
+++ b/media/libstagefright/codecs/amrwbenc/src/qpisf_2s.c
@@ -36,13 +36,13 @@
 
 /* private functions */
 static void VQ_stage1(
-		Word16 * x,                           /* input : ISF residual vector           */
-		Word16 * dico,                        /* input : quantization codebook         */
-		Word16 dim,                           /* input : dimention of vector           */
-		Word16 dico_size,                     /* input : size of quantization codebook */
-		Word16 * index,                       /* output: indices of survivors          */
-		Word16 surv                           /* input : number of survivor            */
-		);
+        Word16 * x,                           /* input : ISF residual vector           */
+        Word16 * dico,                        /* input : quantization codebook         */
+        Word16 dim,                           /* input : dimention of vector           */
+        Word16 dico_size,                     /* input : size of quantization codebook */
+        Word16 * index,                       /* output: indices of survivors          */
+        Word16 surv                           /* input : number of survivor            */
+        );
 
 /**************************************************************************
 * Function:   Qpisf_2s_46B()                                              *
@@ -54,84 +54,84 @@
 ***************************************************************************/
 
 void Qpisf_2s_46b(
-		Word16 * isf1,                        /* (i) Q15 : ISF in the frequency domain (0..0.5) */
-		Word16 * isf_q,                       /* (o) Q15 : quantized ISF               (0..0.5) */
-		Word16 * past_isfq,                   /* (io)Q15 : past ISF quantizer                   */
-		Word16 * indice,                      /* (o)     : quantization indices                 */
-		Word16 nb_surv                        /* (i)     : number of survivor (1, 2, 3 or 4)    */
-		)
+        Word16 * isf1,                        /* (i) Q15 : ISF in the frequency domain (0..0.5) */
+        Word16 * isf_q,                       /* (o) Q15 : quantized ISF               (0..0.5) */
+        Word16 * past_isfq,                   /* (io)Q15 : past ISF quantizer                   */
+        Word16 * indice,                      /* (o)     : quantization indices                 */
+        Word16 nb_surv                        /* (i)     : number of survivor (1, 2, 3 or 4)    */
+        )
 {
-	Word16 tmp_ind[5];
-	Word16 surv1[N_SURV_MAX];              /* indices of survivors from 1st stage */
-	Word32 i, k, temp, min_err, distance;
-	Word16 isf[ORDER];
-	Word16 isf_stage2[ORDER];
+    Word16 tmp_ind[5];
+    Word16 surv1[N_SURV_MAX];              /* indices of survivors from 1st stage */
+    Word32 i, k, temp, min_err, distance;
+    Word16 isf[ORDER];
+    Word16 isf_stage2[ORDER];
 
-	for (i = 0; i < ORDER; i++)
-	{
-		isf[i] = vo_sub(isf1[i], mean_isf[i]);
-		isf[i] = vo_sub(isf[i], vo_mult(MU, past_isfq[i]));
-	}
+    for (i = 0; i < ORDER; i++)
+    {
+        isf[i] = vo_sub(isf1[i], mean_isf[i]);
+        isf[i] = vo_sub(isf[i], vo_mult(MU, past_isfq[i]));
+    }
 
-	VQ_stage1(&isf[0], dico1_isf, 9, SIZE_BK1, surv1, nb_surv);
+    VQ_stage1(&isf[0], dico1_isf, 9, SIZE_BK1, surv1, nb_surv);
 
-	distance = MAX_32;
+    distance = MAX_32;
 
-	for (k = 0; k < nb_surv; k++)
-	{
-		for (i = 0; i < 9; i++)
-		{
-			isf_stage2[i] = vo_sub(isf[i], dico1_isf[i + surv1[k] * 9]);
-		}
-		tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico21_isf, 3, SIZE_BK21, &min_err);
-		temp = min_err;
-		tmp_ind[1] = Sub_VQ(&isf_stage2[3], dico22_isf, 3, SIZE_BK22, &min_err);
-		temp = vo_L_add(temp, min_err);
-		tmp_ind[2] = Sub_VQ(&isf_stage2[6], dico23_isf, 3, SIZE_BK23, &min_err);
-		temp = vo_L_add(temp, min_err);
+    for (k = 0; k < nb_surv; k++)
+    {
+        for (i = 0; i < 9; i++)
+        {
+            isf_stage2[i] = vo_sub(isf[i], dico1_isf[i + surv1[k] * 9]);
+        }
+        tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico21_isf, 3, SIZE_BK21, &min_err);
+        temp = min_err;
+        tmp_ind[1] = Sub_VQ(&isf_stage2[3], dico22_isf, 3, SIZE_BK22, &min_err);
+        temp = vo_L_add(temp, min_err);
+        tmp_ind[2] = Sub_VQ(&isf_stage2[6], dico23_isf, 3, SIZE_BK23, &min_err);
+        temp = vo_L_add(temp, min_err);
 
-		if(temp < distance)
-		{
-			distance = temp;
-			indice[0] = surv1[k];
-			for (i = 0; i < 3; i++)
-			{
-				indice[i + 2] = tmp_ind[i];
-			}
-		}
-	}
+        if(temp < distance)
+        {
+            distance = temp;
+            indice[0] = surv1[k];
+            for (i = 0; i < 3; i++)
+            {
+                indice[i + 2] = tmp_ind[i];
+            }
+        }
+    }
 
 
-	VQ_stage1(&isf[9], dico2_isf, 7, SIZE_BK2, surv1, nb_surv);
+    VQ_stage1(&isf[9], dico2_isf, 7, SIZE_BK2, surv1, nb_surv);
 
-	distance = MAX_32;
+    distance = MAX_32;
 
-	for (k = 0; k < nb_surv; k++)
-	{
-		for (i = 0; i < 7; i++)
-		{
-			isf_stage2[i] = vo_sub(isf[9 + i], dico2_isf[i + surv1[k] * 7]);
-		}
+    for (k = 0; k < nb_surv; k++)
+    {
+        for (i = 0; i < 7; i++)
+        {
+            isf_stage2[i] = vo_sub(isf[9 + i], dico2_isf[i + surv1[k] * 7]);
+        }
 
-		tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico24_isf, 3, SIZE_BK24, &min_err);
-		temp = min_err;
-		tmp_ind[1] = Sub_VQ(&isf_stage2[3], dico25_isf, 4, SIZE_BK25, &min_err);
-		temp = vo_L_add(temp, min_err);
+        tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico24_isf, 3, SIZE_BK24, &min_err);
+        temp = min_err;
+        tmp_ind[1] = Sub_VQ(&isf_stage2[3], dico25_isf, 4, SIZE_BK25, &min_err);
+        temp = vo_L_add(temp, min_err);
 
-		if(temp < distance)
-		{
-			distance = temp;
-			indice[1] = surv1[k];
-			for (i = 0; i < 2; i++)
-			{
-				indice[i + 5] = tmp_ind[i];
-			}
-		}
-	}
+        if(temp < distance)
+        {
+            distance = temp;
+            indice[1] = surv1[k];
+            for (i = 0; i < 2; i++)
+            {
+                indice[i + 5] = tmp_ind[i];
+            }
+        }
+    }
 
-	Dpisf_2s_46b(indice, isf_q, past_isfq, isf_q, isf_q, 0, 0);
+    Dpisf_2s_46b(indice, isf_q, past_isfq, isf_q, isf_q, 0, 0);
 
-	return;
+    return;
 }
 
 /*****************************************************************************
@@ -144,76 +144,76 @@
 ******************************************************************************/
 
 void Qpisf_2s_36b(
-		Word16 * isf1,                        /* (i) Q15 : ISF in the frequency domain (0..0.5) */
-		Word16 * isf_q,                       /* (o) Q15 : quantized ISF               (0..0.5) */
-		Word16 * past_isfq,                   /* (io)Q15 : past ISF quantizer                   */
-		Word16 * indice,                      /* (o)     : quantization indices                 */
-		Word16 nb_surv                        /* (i)     : number of survivor (1, 2, 3 or 4)    */
-		)
+        Word16 * isf1,                        /* (i) Q15 : ISF in the frequency domain (0..0.5) */
+        Word16 * isf_q,                       /* (o) Q15 : quantized ISF               (0..0.5) */
+        Word16 * past_isfq,                   /* (io)Q15 : past ISF quantizer                   */
+        Word16 * indice,                      /* (o)     : quantization indices                 */
+        Word16 nb_surv                        /* (i)     : number of survivor (1, 2, 3 or 4)    */
+        )
 {
-	Word16 i, k, tmp_ind[5];
-	Word16 surv1[N_SURV_MAX];              /* indices of survivors from 1st stage */
-	Word32 temp, min_err, distance;
-	Word16 isf[ORDER];
-	Word16 isf_stage2[ORDER];
+    Word16 i, k, tmp_ind[5];
+    Word16 surv1[N_SURV_MAX];              /* indices of survivors from 1st stage */
+    Word32 temp, min_err, distance;
+    Word16 isf[ORDER];
+    Word16 isf_stage2[ORDER];
 
-	for (i = 0; i < ORDER; i++)
-	{
-		isf[i] = vo_sub(isf1[i], mean_isf[i]);
-		isf[i] = vo_sub(isf[i], vo_mult(MU, past_isfq[i]));
-	}
+    for (i = 0; i < ORDER; i++)
+    {
+        isf[i] = vo_sub(isf1[i], mean_isf[i]);
+        isf[i] = vo_sub(isf[i], vo_mult(MU, past_isfq[i]));
+    }
 
-	VQ_stage1(&isf[0], dico1_isf, 9, SIZE_BK1, surv1, nb_surv);
+    VQ_stage1(&isf[0], dico1_isf, 9, SIZE_BK1, surv1, nb_surv);
 
-	distance = MAX_32;
+    distance = MAX_32;
 
-	for (k = 0; k < nb_surv; k++)
-	{
-		for (i = 0; i < 9; i++)
-		{
-			isf_stage2[i] = vo_sub(isf[i], dico1_isf[i + surv1[k] * 9]);
-		}
+    for (k = 0; k < nb_surv; k++)
+    {
+        for (i = 0; i < 9; i++)
+        {
+            isf_stage2[i] = vo_sub(isf[i], dico1_isf[i + surv1[k] * 9]);
+        }
 
-		tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico21_isf_36b, 5, SIZE_BK21_36b, &min_err);
-		temp = min_err;
-		tmp_ind[1] = Sub_VQ(&isf_stage2[5], dico22_isf_36b, 4, SIZE_BK22_36b, &min_err);
-		temp = vo_L_add(temp, min_err);
+        tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico21_isf_36b, 5, SIZE_BK21_36b, &min_err);
+        temp = min_err;
+        tmp_ind[1] = Sub_VQ(&isf_stage2[5], dico22_isf_36b, 4, SIZE_BK22_36b, &min_err);
+        temp = vo_L_add(temp, min_err);
 
-		if(temp < distance)
-		{
-			distance = temp;
-			indice[0] = surv1[k];
-			for (i = 0; i < 2; i++)
-			{
-				indice[i + 2] = tmp_ind[i];
-			}
-		}
-	}
+        if(temp < distance)
+        {
+            distance = temp;
+            indice[0] = surv1[k];
+            for (i = 0; i < 2; i++)
+            {
+                indice[i + 2] = tmp_ind[i];
+            }
+        }
+    }
 
-	VQ_stage1(&isf[9], dico2_isf, 7, SIZE_BK2, surv1, nb_surv);
-	distance = MAX_32;
+    VQ_stage1(&isf[9], dico2_isf, 7, SIZE_BK2, surv1, nb_surv);
+    distance = MAX_32;
 
-	for (k = 0; k < nb_surv; k++)
-	{
-		for (i = 0; i < 7; i++)
-		{
-			isf_stage2[i] = vo_sub(isf[9 + i], dico2_isf[i + surv1[k] * 7]);
-		}
+    for (k = 0; k < nb_surv; k++)
+    {
+        for (i = 0; i < 7; i++)
+        {
+            isf_stage2[i] = vo_sub(isf[9 + i], dico2_isf[i + surv1[k] * 7]);
+        }
 
-		tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico23_isf_36b, 7, SIZE_BK23_36b, &min_err);
-		temp = min_err;
+        tmp_ind[0] = Sub_VQ(&isf_stage2[0], dico23_isf_36b, 7, SIZE_BK23_36b, &min_err);
+        temp = min_err;
 
-		if(temp < distance)
-		{
-			distance = temp;
-			indice[1] = surv1[k];
-			indice[4] = tmp_ind[0];
-		}
-	}
+        if(temp < distance)
+        {
+            distance = temp;
+            indice[1] = surv1[k];
+            indice[4] = tmp_ind[0];
+        }
+    }
 
-	Dpisf_2s_36b(indice, isf_q, past_isfq, isf_q, isf_q, 0, 0);
+    Dpisf_2s_36b(indice, isf_q, past_isfq, isf_q, isf_q, 0, 0);
 
-	return;
+    return;
 }
 
 /*********************************************************************
@@ -223,90 +223,90 @@
 **********************************************************************/
 
 void Dpisf_2s_46b(
-		Word16 * indice,                      /* input:  quantization indices                       */
-		Word16 * isf_q,                       /* output: quantized ISF in frequency domain (0..0.5) */
-		Word16 * past_isfq,                   /* i/0   : past ISF quantizer                    */
-		Word16 * isfold,                      /* input : past quantized ISF                    */
-		Word16 * isf_buf,                     /* input : isf buffer                                                        */
-		Word16 bfi,                           /* input : Bad frame indicator                   */
-		Word16 enc_dec
-		)
+        Word16 * indice,                      /* input:  quantization indices                       */
+        Word16 * isf_q,                       /* output: quantized ISF in frequency domain (0..0.5) */
+        Word16 * past_isfq,                   /* i/0   : past ISF quantizer                    */
+        Word16 * isfold,                      /* input : past quantized ISF                    */
+        Word16 * isf_buf,                     /* input : isf buffer                                                        */
+        Word16 bfi,                           /* input : Bad frame indicator                   */
+        Word16 enc_dec
+        )
 {
-	Word16 ref_isf[M], tmp;
-	Word32 i, j, L_tmp;
+    Word16 ref_isf[M], tmp;
+    Word32 i, j, L_tmp;
 
-	if (bfi == 0)                          /* Good frame */
-	{
-		for (i = 0; i < 9; i++)
-		{
-			isf_q[i] = dico1_isf[indice[0] * 9 + i];
-		}
-		for (i = 0; i < 7; i++)
-		{
-			isf_q[i + 9] = dico2_isf[indice[1] * 7 + i];
-		}
+    if (bfi == 0)                          /* Good frame */
+    {
+        for (i = 0; i < 9; i++)
+        {
+            isf_q[i] = dico1_isf[indice[0] * 9 + i];
+        }
+        for (i = 0; i < 7; i++)
+        {
+            isf_q[i + 9] = dico2_isf[indice[1] * 7 + i];
+        }
 
-		for (i = 0; i < 3; i++)
-		{
-			isf_q[i] = add1(isf_q[i], dico21_isf[indice[2] * 3 + i]);
-			isf_q[i + 3] = add1(isf_q[i + 3], dico22_isf[indice[3] * 3 + i]);
-			isf_q[i + 6] = add1(isf_q[i + 6], dico23_isf[indice[4] * 3 + i]);
-			isf_q[i + 9] = add1(isf_q[i + 9], dico24_isf[indice[5] * 3 + i]);
-		}
+        for (i = 0; i < 3; i++)
+        {
+            isf_q[i] = add1(isf_q[i], dico21_isf[indice[2] * 3 + i]);
+            isf_q[i + 3] = add1(isf_q[i + 3], dico22_isf[indice[3] * 3 + i]);
+            isf_q[i + 6] = add1(isf_q[i + 6], dico23_isf[indice[4] * 3 + i]);
+            isf_q[i + 9] = add1(isf_q[i + 9], dico24_isf[indice[5] * 3 + i]);
+        }
 
-		for (i = 0; i < 4; i++)
-		{
-			isf_q[i + 12] = add1(isf_q[i + 12], dico25_isf[indice[6] * 4 + i]);
-		}
+        for (i = 0; i < 4; i++)
+        {
+            isf_q[i + 12] = add1(isf_q[i + 12], dico25_isf[indice[6] * 4 + i]);
+        }
 
-		for (i = 0; i < ORDER; i++)
-		{
-			tmp = isf_q[i];
-			isf_q[i] = add1(tmp, mean_isf[i]);
-			isf_q[i] = add1(isf_q[i], vo_mult(MU, past_isfq[i]));
-			past_isfq[i] = tmp;
-		}
+        for (i = 0; i < ORDER; i++)
+        {
+            tmp = isf_q[i];
+            isf_q[i] = add1(tmp, mean_isf[i]);
+            isf_q[i] = add1(isf_q[i], vo_mult(MU, past_isfq[i]));
+            past_isfq[i] = tmp;
+        }
 
-		if (enc_dec)
-		{
-			for (i = 0; i < M; i++)
-			{
-				for (j = (L_MEANBUF - 1); j > 0; j--)
-				{
-					isf_buf[j * M + i] = isf_buf[(j - 1) * M + i];
-				}
-				isf_buf[i] = isf_q[i];
-			}
-		}
-	} else
-	{                                      /* bad frame */
-		for (i = 0; i < M; i++)
-		{
-			L_tmp = mean_isf[i] << 14;
-			for (j = 0; j < L_MEANBUF; j++)
-			{
-				L_tmp += (isf_buf[j * M + i] << 14);
-			}
-			ref_isf[i] = vo_round(L_tmp);
-		}
+        if (enc_dec)
+        {
+            for (i = 0; i < M; i++)
+            {
+                for (j = (L_MEANBUF - 1); j > 0; j--)
+                {
+                    isf_buf[j * M + i] = isf_buf[(j - 1) * M + i];
+                }
+                isf_buf[i] = isf_q[i];
+            }
+        }
+    } else
+    {                                      /* bad frame */
+        for (i = 0; i < M; i++)
+        {
+            L_tmp = mean_isf[i] << 14;
+            for (j = 0; j < L_MEANBUF; j++)
+            {
+                L_tmp += (isf_buf[j * M + i] << 14);
+            }
+            ref_isf[i] = vo_round(L_tmp);
+        }
 
-		/* use the past ISFs slightly shifted towards their mean */
-		for (i = 0; i < ORDER; i++)
-		{
-			isf_q[i] = add1(vo_mult(ALPHA, isfold[i]), vo_mult(ONE_ALPHA, ref_isf[i]));
-		}
+        /* use the past ISFs slightly shifted towards their mean */
+        for (i = 0; i < ORDER; i++)
+        {
+            isf_q[i] = add1(vo_mult(ALPHA, isfold[i]), vo_mult(ONE_ALPHA, ref_isf[i]));
+        }
 
-		/* estimate past quantized residual to be used in next frame */
-		for (i = 0; i < ORDER; i++)
-		{
-			tmp = add1(ref_isf[i], vo_mult(past_isfq[i], MU));      /* predicted ISF */
-			past_isfq[i] = vo_sub(isf_q[i], tmp);
-			past_isfq[i] = (past_isfq[i] >> 1);        /* past_isfq[i] *= 0.5 */
-		}
-	}
+        /* estimate past quantized residual to be used in next frame */
+        for (i = 0; i < ORDER; i++)
+        {
+            tmp = add1(ref_isf[i], vo_mult(past_isfq[i], MU));      /* predicted ISF */
+            past_isfq[i] = vo_sub(isf_q[i], tmp);
+            past_isfq[i] = (past_isfq[i] >> 1);        /* past_isfq[i] *= 0.5 */
+        }
+    }
 
-	Reorder_isf(isf_q, ISF_GAP, ORDER);
-	return;
+    Reorder_isf(isf_q, ISF_GAP, ORDER);
+    return;
 }
 
 /*********************************************************************
@@ -316,92 +316,92 @@
 *********************************************************************/
 
 void Dpisf_2s_36b(
-		Word16 * indice,                      /* input:  quantization indices                       */
-		Word16 * isf_q,                       /* output: quantized ISF in frequency domain (0..0.5) */
-		Word16 * past_isfq,                   /* i/0   : past ISF quantizer                    */
-		Word16 * isfold,                      /* input : past quantized ISF                    */
-		Word16 * isf_buf,                     /* input : isf buffer                                                        */
-		Word16 bfi,                           /* input : Bad frame indicator                   */
-		Word16 enc_dec
-		)
+        Word16 * indice,                      /* input:  quantization indices                       */
+        Word16 * isf_q,                       /* output: quantized ISF in frequency domain (0..0.5) */
+        Word16 * past_isfq,                   /* i/0   : past ISF quantizer                    */
+        Word16 * isfold,                      /* input : past quantized ISF                    */
+        Word16 * isf_buf,                     /* input : isf buffer                                                        */
+        Word16 bfi,                           /* input : Bad frame indicator                   */
+        Word16 enc_dec
+        )
 {
-	Word16 ref_isf[M], tmp;
-	Word32 i, j, L_tmp;
+    Word16 ref_isf[M], tmp;
+    Word32 i, j, L_tmp;
 
-	if (bfi == 0)                          /* Good frame */
-	{
-		for (i = 0; i < 9; i++)
-		{
-			isf_q[i] = dico1_isf[indice[0] * 9 + i];
-		}
-		for (i = 0; i < 7; i++)
-		{
-			isf_q[i + 9] = dico2_isf[indice[1] * 7 + i];
-		}
+    if (bfi == 0)                          /* Good frame */
+    {
+        for (i = 0; i < 9; i++)
+        {
+            isf_q[i] = dico1_isf[indice[0] * 9 + i];
+        }
+        for (i = 0; i < 7; i++)
+        {
+            isf_q[i + 9] = dico2_isf[indice[1] * 7 + i];
+        }
 
-		for (i = 0; i < 5; i++)
-		{
-			isf_q[i] = add1(isf_q[i], dico21_isf_36b[indice[2] * 5 + i]);
-		}
-		for (i = 0; i < 4; i++)
-		{
-			isf_q[i + 5] = add1(isf_q[i + 5], dico22_isf_36b[indice[3] * 4 + i]);
-		}
-		for (i = 0; i < 7; i++)
-		{
-			isf_q[i + 9] = add1(isf_q[i + 9], dico23_isf_36b[indice[4] * 7 + i]);
-		}
+        for (i = 0; i < 5; i++)
+        {
+            isf_q[i] = add1(isf_q[i], dico21_isf_36b[indice[2] * 5 + i]);
+        }
+        for (i = 0; i < 4; i++)
+        {
+            isf_q[i + 5] = add1(isf_q[i + 5], dico22_isf_36b[indice[3] * 4 + i]);
+        }
+        for (i = 0; i < 7; i++)
+        {
+            isf_q[i + 9] = add1(isf_q[i + 9], dico23_isf_36b[indice[4] * 7 + i]);
+        }
 
-		for (i = 0; i < ORDER; i++)
-		{
-			tmp = isf_q[i];
-			isf_q[i] = add1(tmp, mean_isf[i]);
-			isf_q[i] = add1(isf_q[i], vo_mult(MU, past_isfq[i]));
-			past_isfq[i] = tmp;
-		}
+        for (i = 0; i < ORDER; i++)
+        {
+            tmp = isf_q[i];
+            isf_q[i] = add1(tmp, mean_isf[i]);
+            isf_q[i] = add1(isf_q[i], vo_mult(MU, past_isfq[i]));
+            past_isfq[i] = tmp;
+        }
 
 
-		if (enc_dec)
-		{
-			for (i = 0; i < M; i++)
-			{
-				for (j = (L_MEANBUF - 1); j > 0; j--)
-				{
-					isf_buf[j * M + i] = isf_buf[(j - 1) * M + i];
-				}
-				isf_buf[i] = isf_q[i];
-			}
-		}
-	} else
-	{                                      /* bad frame */
-		for (i = 0; i < M; i++)
-		{
-			L_tmp = (mean_isf[i] << 14);
-			for (j = 0; j < L_MEANBUF; j++)
-			{
-				L_tmp += (isf_buf[j * M + i] << 14);
-			}
-			ref_isf[i] = vo_round(L_tmp);
-		}
+        if (enc_dec)
+        {
+            for (i = 0; i < M; i++)
+            {
+                for (j = (L_MEANBUF - 1); j > 0; j--)
+                {
+                    isf_buf[j * M + i] = isf_buf[(j - 1) * M + i];
+                }
+                isf_buf[i] = isf_q[i];
+            }
+        }
+    } else
+    {                                      /* bad frame */
+        for (i = 0; i < M; i++)
+        {
+            L_tmp = (mean_isf[i] << 14);
+            for (j = 0; j < L_MEANBUF; j++)
+            {
+                L_tmp += (isf_buf[j * M + i] << 14);
+            }
+            ref_isf[i] = vo_round(L_tmp);
+        }
 
-		/* use the past ISFs slightly shifted towards their mean */
-		for (i = 0; i < ORDER; i++)
-		{
-			isf_q[i] = add1(vo_mult(ALPHA, isfold[i]), vo_mult(ONE_ALPHA, ref_isf[i]));
-		}
+        /* use the past ISFs slightly shifted towards their mean */
+        for (i = 0; i < ORDER; i++)
+        {
+            isf_q[i] = add1(vo_mult(ALPHA, isfold[i]), vo_mult(ONE_ALPHA, ref_isf[i]));
+        }
 
-		/* estimate past quantized residual to be used in next frame */
-		for (i = 0; i < ORDER; i++)
-		{
-			tmp = add1(ref_isf[i], vo_mult(past_isfq[i], MU));      /* predicted ISF */
-			past_isfq[i] = vo_sub(isf_q[i], tmp);
-			past_isfq[i] = past_isfq[i] >> 1;         /* past_isfq[i] *= 0.5 */
-		}
-	}
+        /* estimate past quantized residual to be used in next frame */
+        for (i = 0; i < ORDER; i++)
+        {
+            tmp = add1(ref_isf[i], vo_mult(past_isfq[i], MU));      /* predicted ISF */
+            past_isfq[i] = vo_sub(isf_q[i], tmp);
+            past_isfq[i] = past_isfq[i] >> 1;         /* past_isfq[i] *= 0.5 */
+        }
+    }
 
-	Reorder_isf(isf_q, ISF_GAP, ORDER);
+    Reorder_isf(isf_q, ISF_GAP, ORDER);
 
-	return;
+    return;
 }
 
 
@@ -419,122 +419,122 @@
 ****************************************************************************/
 
 void Reorder_isf(
-		Word16 * isf,                         /* (i/o) Q15: ISF in the frequency domain (0..0.5) */
-		Word16 min_dist,                      /* (i) Q15  : minimum distance to keep             */
-		Word16 n                              /* (i)      : number of ISF                        */
-		)
+        Word16 * isf,                         /* (i/o) Q15: ISF in the frequency domain (0..0.5) */
+        Word16 min_dist,                      /* (i) Q15  : minimum distance to keep             */
+        Word16 n                              /* (i)      : number of ISF                        */
+        )
 {
-	Word32 i;
-	Word16 isf_min;
+    Word32 i;
+    Word16 isf_min;
 
-	isf_min = min_dist;
-	for (i = 0; i < n - 1; i++)
-	{
-		if(isf[i] < isf_min)
-		{
-			isf[i] = isf_min;
-		}
-		isf_min = (isf[i] + min_dist);
-	}
-	return;
+    isf_min = min_dist;
+    for (i = 0; i < n - 1; i++)
+    {
+        if(isf[i] < isf_min)
+        {
+            isf[i] = isf_min;
+        }
+        isf_min = (isf[i] + min_dist);
+    }
+    return;
 }
 
 
 Word16 Sub_VQ(                             /* output: return quantization index     */
-		Word16 * x,                           /* input : ISF residual vector           */
-		Word16 * dico,                        /* input : quantization codebook         */
-		Word16 dim,                           /* input : dimention of vector           */
-		Word16 dico_size,                     /* input : size of quantization codebook */
-		Word32 * distance                     /* output: error of quantization         */
-	     )
+        Word16 * x,                           /* input : ISF residual vector           */
+        Word16 * dico,                        /* input : quantization codebook         */
+        Word16 dim,                           /* input : dimention of vector           */
+        Word16 dico_size,                     /* input : size of quantization codebook */
+        Word32 * distance                     /* output: error of quantization         */
+         )
 {
-	Word16 temp, *p_dico;
-	Word32 i, j, index;
-	Word32 dist_min, dist;
+    Word16 temp, *p_dico;
+    Word32 i, j, index;
+    Word32 dist_min, dist;
 
-	dist_min = MAX_32;
-	p_dico = dico;
+    dist_min = MAX_32;
+    p_dico = dico;
 
-	index = 0;
-	for (i = 0; i < dico_size; i++)
-	{
-		dist = 0;
+    index = 0;
+    for (i = 0; i < dico_size; i++)
+    {
+        dist = 0;
 
-		for (j = 0; j < dim; j++)
-		{
-			temp = x[j] - (*p_dico++);
-			dist += (temp * temp)<<1;
-		}
+        for (j = 0; j < dim; j++)
+        {
+            temp = x[j] - (*p_dico++);
+            dist += (temp * temp)<<1;
+        }
 
-		if(dist < dist_min)
-		{
-			dist_min = dist;
-			index = i;
-		}
-	}
+        if(dist < dist_min)
+        {
+            dist_min = dist;
+            index = i;
+        }
+    }
 
-	*distance = dist_min;
+    *distance = dist_min;
 
-	/* Reading the selected vector */
-	p_dico = &dico[index * dim];
-	for (j = 0; j < dim; j++)
-	{
-		x[j] = *p_dico++;
-	}
+    /* Reading the selected vector */
+    p_dico = &dico[index * dim];
+    for (j = 0; j < dim; j++)
+    {
+        x[j] = *p_dico++;
+    }
 
-	return index;
+    return index;
 }
 
 
 static void VQ_stage1(
-		Word16 * x,                           /* input : ISF residual vector           */
-		Word16 * dico,                        /* input : quantization codebook         */
-		Word16 dim,                           /* input : dimention of vector           */
-		Word16 dico_size,                     /* input : size of quantization codebook */
-		Word16 * index,                       /* output: indices of survivors          */
-		Word16 surv                           /* input : number of survivor            */
-		)
+        Word16 * x,                           /* input : ISF residual vector           */
+        Word16 * dico,                        /* input : quantization codebook         */
+        Word16 dim,                           /* input : dimention of vector           */
+        Word16 dico_size,                     /* input : size of quantization codebook */
+        Word16 * index,                       /* output: indices of survivors          */
+        Word16 surv                           /* input : number of survivor            */
+        )
 {
-	Word16 temp, *p_dico;
-	Word32 i, j, k, l;
-	Word32 dist_min[N_SURV_MAX], dist;
+    Word16 temp, *p_dico;
+    Word32 i, j, k, l;
+    Word32 dist_min[N_SURV_MAX], dist;
 
-	dist_min[0] = MAX_32;
-	dist_min[1] = MAX_32;
-	dist_min[2] = MAX_32;
-	dist_min[3] = MAX_32;
-	index[0] = 0;
-	index[1] = 1;
-	index[2] = 2;
-	index[3] = 3;
+    dist_min[0] = MAX_32;
+    dist_min[1] = MAX_32;
+    dist_min[2] = MAX_32;
+    dist_min[3] = MAX_32;
+    index[0] = 0;
+    index[1] = 1;
+    index[2] = 2;
+    index[3] = 3;
 
-	p_dico = dico;
+    p_dico = dico;
 
-	for (i = 0; i < dico_size; i++)
-	{
-		dist = 0;
-		for (j = 0; j < dim; j++)
-		{
-			temp = x[j] -  (*p_dico++);
-			dist += (temp * temp)<<1;
-		}
+    for (i = 0; i < dico_size; i++)
+    {
+        dist = 0;
+        for (j = 0; j < dim; j++)
+        {
+            temp = x[j] -  (*p_dico++);
+            dist += (temp * temp)<<1;
+        }
 
-		for (k = 0; k < surv; k++)
-		{
-			if(dist < dist_min[k])
-			{
-				for (l = surv - 1; l > k; l--)
-				{
-					dist_min[l] = dist_min[l - 1];
-					index[l] = index[l - 1];
-				}
-				dist_min[k] = dist;
-				index[k] = i;
-				break;
-			}
-		}
-	}
-	return;
+        for (k = 0; k < surv; k++)
+        {
+            if(dist < dist_min[k])
+            {
+                for (l = surv - 1; l > k; l--)
+                {
+                    dist_min[l] = dist_min[l - 1];
+                    index[l] = index[l - 1];
+                }
+                dist_min[k] = dist;
+                index[k] = i;
+                break;
+            }
+        }
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/random.c b/media/libstagefright/codecs/amrwbenc/src/random.c
index b896863..758343c 100644
--- a/media/libstagefright/codecs/amrwbenc/src/random.c
+++ b/media/libstagefright/codecs/amrwbenc/src/random.c
@@ -26,8 +26,8 @@
 
 Word16 Random(Word16 * seed)
 {
-	/* static Word16 seed = 21845; */
-	*seed = (Word16)(L_add((L_mult(*seed, 31821) >> 1), 13849L));
-	return (*seed);
+    /* static Word16 seed = 21845; */
+    *seed = (Word16)(L_add((L_mult(*seed, 31821) >> 1), 13849L));
+    return (*seed);
 }
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/residu.c b/media/libstagefright/codecs/amrwbenc/src/residu.c
index b0c04b5..76d0e41 100644
--- a/media/libstagefright/codecs/amrwbenc/src/residu.c
+++ b/media/libstagefright/codecs/amrwbenc/src/residu.c
@@ -26,41 +26,41 @@
 #include "basic_op.h"
 
 void Residu(
-		Word16 a[],                           /* (i) Q12 : prediction coefficients                     */
-		Word16 x[],                           /* (i)     : speech (values x[-m..-1] are needed         */
-		Word16 y[],                           /* (o) x2  : residual signal                             */
-		Word16 lg                             /* (i)     : size of filtering                           */
-		)
+        Word16 a[],                           /* (i) Q12 : prediction coefficients                     */
+        Word16 x[],                           /* (i)     : speech (values x[-m..-1] are needed         */
+        Word16 y[],                           /* (o) x2  : residual signal                             */
+        Word16 lg                             /* (i)     : size of filtering                           */
+        )
 {
-	Word16 i,*p1, *p2;
-	Word32 s;
-	for (i = 0; i < lg; i++)
-	{
-		p1 = a;
-		p2 = &x[i];
-		s  = vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1++), (*p2--));
-		s += vo_mult32((*p1), (*p2));
+    Word16 i,*p1, *p2;
+    Word32 s;
+    for (i = 0; i < lg; i++)
+    {
+        p1 = a;
+        p2 = &x[i];
+        s  = vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1++), (*p2--));
+        s += vo_mult32((*p1), (*p2));
 
-		s = L_shl2(s, 5);
-		y[i] = extract_h(L_add(s, 0x8000));
-	}
+        s = L_shl2(s, 5);
+        y[i] = extract_h(L_add(s, 0x8000));
+    }
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/scale.c b/media/libstagefright/codecs/amrwbenc/src/scale.c
index 418cc06..21458c8 100644
--- a/media/libstagefright/codecs/amrwbenc/src/scale.c
+++ b/media/libstagefright/codecs/amrwbenc/src/scale.c
@@ -25,32 +25,32 @@
 #include "basic_op.h"
 
 void Scale_sig(
-		Word16 x[],                           /* (i/o) : signal to scale               */
-		Word16 lg,                            /* (i)   : size of x[]                   */
-		Word16 exp                            /* (i)   : exponent: x = round(x << exp) */
-	      )
+        Word16 x[],                           /* (i/o) : signal to scale               */
+        Word16 lg,                            /* (i)   : size of x[]                   */
+        Word16 exp                            /* (i)   : exponent: x = round(x << exp) */
+          )
 {
-	Word32 i;
-	Word32 L_tmp;
-	if(exp > 0)
-	{
-		for (i = lg - 1 ; i >= 0; i--)
-		{
-			L_tmp = L_shl2(x[i], 16 + exp);
-			x[i] = extract_h(L_add(L_tmp, 0x8000));
-		}
-	}
-	else
-	{
-		exp = -exp;
-		for (i = lg - 1; i >= 0; i--)
-		{
-			L_tmp = x[i] << 16;
-			L_tmp >>= exp;
-			x[i] = (L_tmp + 0x8000)>>16;
-		}
-	}
-	return;
+    Word32 i;
+    Word32 L_tmp;
+    if(exp > 0)
+    {
+        for (i = lg - 1 ; i >= 0; i--)
+        {
+            L_tmp = L_shl2(x[i], 16 + exp);
+            x[i] = extract_h(L_add(L_tmp, 0x8000));
+        }
+    }
+    else
+    {
+        exp = -exp;
+        for (i = lg - 1; i >= 0; i--)
+        {
+            L_tmp = x[i] << 16;
+            L_tmp >>= exp;
+            x[i] = (L_tmp + 0x8000)>>16;
+        }
+    }
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/stream.c b/media/libstagefright/codecs/amrwbenc/src/stream.c
index 780f009..a39149e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/stream.c
+++ b/media/libstagefright/codecs/amrwbenc/src/stream.c
@@ -25,34 +25,34 @@
 
 void voAWB_InitFrameBuffer(FrameStream *stream)
 {
-	stream->set_ptr = NULL;
-	stream->frame_ptr_bk = stream->frame_ptr;
-	stream->set_len = 0;
-	stream->framebuffer_len = 0;
-	stream->frame_storelen = 0;
+    stream->set_ptr = NULL;
+    stream->frame_ptr_bk = stream->frame_ptr;
+    stream->set_len = 0;
+    stream->framebuffer_len = 0;
+    stream->frame_storelen = 0;
 }
 
 void voAWB_UpdateFrameBuffer(
-		FrameStream *stream,
-		VO_MEM_OPERATOR *pMemOP
-		)
+        FrameStream *stream,
+        VO_MEM_OPERATOR *pMemOP
+        )
 {
-	int  len;
-	len  = MIN(Frame_Maxsize - stream->frame_storelen, stream->set_len);
-	pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk + stream->frame_storelen , stream->set_ptr, len);
-	stream->set_len -= len;
-	stream->set_ptr += len;
-	stream->framebuffer_len = stream->frame_storelen + len;
-	stream->frame_ptr = stream->frame_ptr_bk;
-	stream->used_len += len;
+    int  len;
+    len  = MIN(Frame_Maxsize - stream->frame_storelen, stream->set_len);
+    pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk + stream->frame_storelen , stream->set_ptr, len);
+    stream->set_len -= len;
+    stream->set_ptr += len;
+    stream->framebuffer_len = stream->frame_storelen + len;
+    stream->frame_ptr = stream->frame_ptr_bk;
+    stream->used_len += len;
 }
 
 void voAWB_FlushFrameBuffer(FrameStream *stream)
 {
-	stream->set_ptr = NULL;
-	stream->frame_ptr_bk = stream->frame_ptr;
-	stream->set_len = 0;
-	stream->framebuffer_len = 0;
-	stream->frame_storelen = 0;
+    stream->set_ptr = NULL;
+    stream->frame_ptr_bk = stream->frame_ptr;
+    stream->set_len = 0;
+    stream->framebuffer_len = 0;
+    stream->frame_storelen = 0;
 }
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/syn_filt.c b/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
index 961aadc..7eba12f 100644
--- a/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
+++ b/media/libstagefright/codecs/amrwbenc/src/syn_filt.c
@@ -29,134 +29,134 @@
 #define UNUSED(x) (void)(x)
 
 void Syn_filt(
-		Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients           */
-		Word16 x[],                           /* (i)     : input signal                             */
-		Word16 y[],                           /* (o)     : output signal                            */
-		Word16 lg,                            /* (i)     : size of filtering                        */
-		Word16 mem[],                         /* (i/o)   : memory associated with this filtering.   */
-		Word16 update                         /* (i)     : 0=no update, 1=update of memory.         */
-	     )
+        Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients           */
+        Word16 x[],                           /* (i)     : input signal                             */
+        Word16 y[],                           /* (o)     : output signal                            */
+        Word16 lg,                            /* (i)     : size of filtering                        */
+        Word16 mem[],                         /* (i/o)   : memory associated with this filtering.   */
+        Word16 update                         /* (i)     : 0=no update, 1=update of memory.         */
+         )
 {
-	Word32 i, a0;
-	Word16 y_buf[L_SUBFR16k + M16k];
-	Word32 L_tmp;
-	Word16 *yy, *p1, *p2;
-	yy = &y_buf[0];
-	/* copy initial filter states into synthesis buffer */
-	for (i = 0; i < 16; i++)
-	{
-		*yy++ = mem[i];
-	}
-	a0 = (a[0] >> 1);                     /* input / 2 */
-	/* Do the filtering. */
-	for (i = 0; i < lg; i++)
-	{
-		p1 = &a[1];
-		p2 = &yy[i-1];
-		L_tmp  = vo_mult32(a0, x[i]);
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1++), (*p2--));
-		L_tmp -= vo_mult32((*p1), (*p2));
+    Word32 i, a0;
+    Word16 y_buf[L_SUBFR16k + M16k];
+    Word32 L_tmp;
+    Word16 *yy, *p1, *p2;
+    yy = &y_buf[0];
+    /* copy initial filter states into synthesis buffer */
+    for (i = 0; i < 16; i++)
+    {
+        *yy++ = mem[i];
+    }
+    a0 = (a[0] >> 1);                     /* input / 2 */
+    /* Do the filtering. */
+    for (i = 0; i < lg; i++)
+    {
+        p1 = &a[1];
+        p2 = &yy[i-1];
+        L_tmp  = vo_mult32(a0, x[i]);
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1++), (*p2--));
+        L_tmp -= vo_mult32((*p1), (*p2));
 
-		L_tmp = L_shl2(L_tmp, 4);
-		y[i] = yy[i] = extract_h(L_add(L_tmp, 0x8000));
-	}
-	/* Update memory if required */
-	if (update)
-		for (i = 0; i < 16; i++)
-		{
-			mem[i] = yy[lg - 16 + i];
-		}
-	return;
+        L_tmp = L_shl2(L_tmp, 4);
+        y[i] = yy[i] = extract_h(L_add(L_tmp, 0x8000));
+    }
+    /* Update memory if required */
+    if (update)
+        for (i = 0; i < 16; i++)
+        {
+            mem[i] = yy[lg - 16 + i];
+        }
+    return;
 }
 
 
 void Syn_filt_32(
-		Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients */
-		Word16 m,                             /* (i)     : order of LP filter             */
-		Word16 exc[],                         /* (i) Qnew: excitation (exc[i] >> Qnew)    */
-		Word16 Qnew,                          /* (i)     : exc scaling = 0(min) to 8(max) */
-		Word16 sig_hi[],                      /* (o) /16 : synthesis high                 */
-		Word16 sig_lo[],                      /* (o) /16 : synthesis low                  */
-		Word16 lg                             /* (i)     : size of filtering              */
-		)
+        Word16 a[],                           /* (i) Q12 : a[m+1] prediction coefficients */
+        Word16 m,                             /* (i)     : order of LP filter             */
+        Word16 exc[],                         /* (i) Qnew: excitation (exc[i] >> Qnew)    */
+        Word16 Qnew,                          /* (i)     : exc scaling = 0(min) to 8(max) */
+        Word16 sig_hi[],                      /* (o) /16 : synthesis high                 */
+        Word16 sig_lo[],                      /* (o) /16 : synthesis low                  */
+        Word16 lg                             /* (i)     : size of filtering              */
+        )
 {
-	Word32 i,a0;
-	Word32 L_tmp, L_tmp1;
-	Word16 *p1, *p2, *p3;
+    Word32 i,a0;
+    Word32 L_tmp, L_tmp1;
+    Word16 *p1, *p2, *p3;
         UNUSED(m);
 
-	a0 = a[0] >> (4 + Qnew);          /* input / 16 and >>Qnew */
-	/* Do the filtering. */
-	for (i = 0; i < lg; i++)
-	{
-		L_tmp  = 0;
-		L_tmp1 = 0;
-		p1 = a;
-		p2 = &sig_lo[i - 1];
-		p3 = &sig_hi[i - 1];
+    a0 = a[0] >> (4 + Qnew);          /* input / 16 and >>Qnew */
+    /* Do the filtering. */
+    for (i = 0; i < lg; i++)
+    {
+        L_tmp  = 0;
+        L_tmp1 = 0;
+        p1 = a;
+        p2 = &sig_lo[i - 1];
+        p3 = &sig_hi[i - 1];
 
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
-		L_tmp  -= vo_mult32((*p2--), (*p1));
-		L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
+        L_tmp  -= vo_mult32((*p2--), (*p1));
+        L_tmp1 -= vo_mult32((*p3--), (*p1++));
 
-		L_tmp = L_tmp >> 11;
-		L_tmp += vo_L_mult(exc[i], a0);
+        L_tmp = L_tmp >> 11;
+        L_tmp += vo_L_mult(exc[i], a0);
 
-		/* sig_hi = bit16 to bit31 of synthesis */
-		L_tmp = L_tmp - (L_tmp1<<1);
+        /* sig_hi = bit16 to bit31 of synthesis */
+        L_tmp = L_tmp - (L_tmp1<<1);
 
-		L_tmp = L_tmp >> 3;           /* ai in Q12 */
-		sig_hi[i] = extract_h(L_tmp);
+        L_tmp = L_tmp >> 3;           /* ai in Q12 */
+        sig_hi[i] = extract_h(L_tmp);
 
-		/* sig_lo = bit4 to bit15 of synthesis */
-		L_tmp >>= 4;           /* 4 : sig_lo[i] >> 4 */
-		sig_lo[i] = (Word16)((L_tmp - (sig_hi[i] << 13)));
-	}
+        /* sig_lo = bit4 to bit15 of synthesis */
+        L_tmp >>= 4;           /* 4 : sig_lo[i] >> 4 */
+        sig_lo[i] = (Word16)((L_tmp - (sig_hi[i] << 13)));
+    }
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/updt_tar.c b/media/libstagefright/codecs/amrwbenc/src/updt_tar.c
index 96779fd..ba7c2ff 100644
--- a/media/libstagefright/codecs/amrwbenc/src/updt_tar.c
+++ b/media/libstagefright/codecs/amrwbenc/src/updt_tar.c
@@ -25,24 +25,25 @@
 #include "basic_op.h"
 
 void Updt_tar(
-		Word16 * x,                           /* (i) Q0  : old target (for pitch search)     */
-		Word16 * x2,                          /* (o) Q0  : new target (for codebook search)  */
-		Word16 * y,                           /* (i) Q0  : filtered adaptive codebook vector */
-		Word16 gain,                          /* (i) Q14 : adaptive codebook gain            */
-		Word16 L                              /* (i)     : subframe size                     */
-	     )
+        Word16 * x,                           /* (i) Q0  : old target (for pitch search)     */
+        Word16 * x2,                          /* (o) Q0  : new target (for codebook search)  */
+        Word16 * y,                           /* (i) Q0  : filtered adaptive codebook vector */
+        Word16 gain,                          /* (i) Q14 : adaptive codebook gain            */
+        Word16 L                              /* (i)     : subframe size                     */
+         )
 {
-	Word32 i;
-	Word32 L_tmp;
+    Word32 i;
+    Word32 L_tmp, L_tmp2;
 
-	for (i = 0; i < L; i++)
-	{
-		L_tmp = x[i] << 15;
-		L_tmp -= (y[i] * gain)<<1;
-		x2[i] = extract_h(L_shl2(L_tmp, 1));
-	}
+    for (i = 0; i < L; i++)
+    {
+        L_tmp = x[i] << 15;
+        L_tmp2 = L_mult(y[i], gain);
+        L_tmp = L_sub(L_tmp, L_tmp2);
+        x2[i] = extract_h(L_shl2(L_tmp, 1));
+    }
 
-	return;
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/util.c b/media/libstagefright/codecs/amrwbenc/src/util.c
index 333140d..374245f 100644
--- a/media/libstagefright/codecs/amrwbenc/src/util.c
+++ b/media/libstagefright/codecs/amrwbenc/src/util.c
@@ -30,15 +30,15 @@
 ************************************************************************/
 
 void Set_zero(
-		Word16 x[],                           /* (o)    : vector to clear     */
-		Word16 L                              /* (i)    : length of vector    */
-	     )
+        Word16 x[],                           /* (o)    : vector to clear     */
+        Word16 L                              /* (i)    : length of vector    */
+         )
 {
-	Word32 num = (Word32)L;
-	while (num > 0) {
-		*x++ = 0;
+    Word32 num = (Word32)L;
+    while (num > 0) {
+        *x++ = 0;
                 --num;
-	}
+    }
 }
 
 
@@ -49,28 +49,28 @@
 *********************************************************************/
 
 void Copy(
-		Word16 x[],                           /* (i)   : input vector   */
-		Word16 y[],                           /* (o)   : output vector  */
-		Word16 L                              /* (i)   : vector length  */
-	 )
+        Word16 x[],                           /* (i)   : input vector   */
+        Word16 y[],                           /* (o)   : output vector  */
+        Word16 L                              /* (i)   : vector length  */
+     )
 {
-	Word32	temp1,temp2,num;
+    Word32  temp1,temp2,num;
         if (L <= 0) {
                 return;
         }
-	if(L&1)
-	{
-		temp1 = *x++;
-		*y++ = temp1;
-	}
-	num = (Word32)(L>>1);
-	while (num > 0) {
-		temp1 = *x++;
-		temp2 = *x++;
-		*y++ = temp1;
-		*y++ = temp2;
+    if(L&1)
+    {
+        temp1 = *x++;
+        *y++ = temp1;
+    }
+    num = (Word32)(L>>1);
+    while (num > 0) {
+        temp1 = *x++;
+        temp2 = *x++;
+        *y++ = temp1;
+        *y++ = temp2;
                 --num;
-	}
+    }
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
index df7b9b3..b908ff8 100644
--- a/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
+++ b/media/libstagefright/codecs/amrwbenc/src/voAMRWBEnc.c
@@ -19,8 +19,8 @@
 *                                                                      *
 *      Description: Performs the main encoder routine                  *
 *                   Fixed-point C simulation of AMR WB ACELP coding    *
-*		    algorithm with 20 msspeech frames for              *
-*		    wideband speech signals.                           *
+*           algorithm with 20 msspeech frames for              *
+*           wideband speech signals.                           *
 *                                                                      *
 ************************************************************************/
 
@@ -51,95 +51,95 @@
 /* isp tables for initialization */
 static Word16 isp_init[M] =
 {
-	32138, 30274, 27246, 23170, 18205, 12540, 6393, 0,
-	-6393, -12540, -18205, -23170, -27246, -30274, -32138, 1475
+    32138, 30274, 27246, 23170, 18205, 12540, 6393, 0,
+    -6393, -12540, -18205, -23170, -27246, -30274, -32138, 1475
 };
 
 static Word16 isf_init[M] =
 {
-	1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192,
-	9216, 10240, 11264, 12288, 13312, 14336, 15360, 3840
+    1024, 2048, 3072, 4096, 5120, 6144, 7168, 8192,
+    9216, 10240, 11264, 12288, 13312, 14336, 15360, 3840
 };
 
 /* High Band encoding */
 static const Word16 HP_gain[16] =
 {
-	3624, 4673, 5597, 6479, 7425, 8378, 9324, 10264,
-	11210, 12206, 13391, 14844, 16770, 19655, 24289, 32728
+    3624, 4673, 5597, 6479, 7425, 8378, 9324, 10264,
+    11210, 12206, 13391, 14844, 16770, 19655, 24289, 32728
 };
 
 /* Private function declaration */
 static Word16 synthesis(
-			Word16 Aq[],                          /* A(z)  : quantized Az               */
-			Word16 exc[],                         /* (i)   : excitation at 12kHz        */
-			Word16 Q_new,                         /* (i)   : scaling performed on exc   */
-			Word16 synth16k[],                    /* (o)   : 16kHz synthesis signal     */
-			Coder_State * st                      /* (i/o) : State structure            */
-			);
+            Word16 Aq[],                          /* A(z)  : quantized Az               */
+            Word16 exc[],                         /* (i)   : excitation at 12kHz        */
+            Word16 Q_new,                         /* (i)   : scaling performed on exc   */
+            Word16 synth16k[],                    /* (o)   : 16kHz synthesis signal     */
+            Coder_State * st                      /* (i/o) : State structure            */
+            );
 
 /* Codec some parameters initialization */
 void Reset_encoder(void *st, Word16 reset_all)
 {
-	Word16 i;
-	Coder_State *cod_state;
-	cod_state = (Coder_State *) st;
-	Set_zero(cod_state->old_exc, PIT_MAX + L_INTERPOL);
-	Set_zero(cod_state->mem_syn, M);
-	Set_zero(cod_state->past_isfq, M);
-	cod_state->mem_w0 = 0;
-	cod_state->tilt_code = 0;
-	cod_state->first_frame = 1;
-	Init_gp_clip(cod_state->gp_clip);
-	cod_state->L_gc_thres = 0;
-	if (reset_all != 0)
-	{
-		/* Static vectors to zero */
-		Set_zero(cod_state->old_speech, L_TOTAL - L_FRAME);
-		Set_zero(cod_state->old_wsp, (PIT_MAX / OPL_DECIM));
-		Set_zero(cod_state->mem_decim2, 3);
-		/* routines initialization */
-		Init_Decim_12k8(cod_state->mem_decim);
-		Init_HP50_12k8(cod_state->mem_sig_in);
-		Init_Levinson(cod_state->mem_levinson);
-		Init_Q_gain2(cod_state->qua_gain);
-		Init_Hp_wsp(cod_state->hp_wsp_mem);
-		/* isp initialization */
-		Copy(isp_init, cod_state->ispold, M);
-		Copy(isp_init, cod_state->ispold_q, M);
-		/* variable initialization */
-		cod_state->mem_preemph = 0;
-		cod_state->mem_wsp = 0;
-		cod_state->Q_old = 15;
-		cod_state->Q_max[0] = 15;
-		cod_state->Q_max[1] = 15;
-		cod_state->old_wsp_max = 0;
-		cod_state->old_wsp_shift = 0;
-		/* pitch ol initialization */
-		cod_state->old_T0_med = 40;
-		cod_state->ol_gain = 0;
-		cod_state->ada_w = 0;
-		cod_state->ol_wght_flg = 0;
-		for (i = 0; i < 5; i++)
-		{
-			cod_state->old_ol_lag[i] = 40;
-		}
-		Set_zero(cod_state->old_hp_wsp, (L_FRAME / 2) / OPL_DECIM + (PIT_MAX / OPL_DECIM));
-		Set_zero(cod_state->mem_syn_hf, M);
-		Set_zero(cod_state->mem_syn_hi, M);
-		Set_zero(cod_state->mem_syn_lo, M);
-		Init_HP50_12k8(cod_state->mem_sig_out);
-		Init_Filt_6k_7k(cod_state->mem_hf);
-		Init_HP400_12k8(cod_state->mem_hp400);
-		Copy(isf_init, cod_state->isfold, M);
-		cod_state->mem_deemph = 0;
-		cod_state->seed2 = 21845;
-		Init_Filt_6k_7k(cod_state->mem_hf2);
-		cod_state->gain_alpha = 32767;
-		cod_state->vad_hist = 0;
-		wb_vad_reset(cod_state->vadSt);
-		dtx_enc_reset(cod_state->dtx_encSt, isf_init);
-	}
-	return;
+    Word16 i;
+    Coder_State *cod_state;
+    cod_state = (Coder_State *) st;
+    Set_zero(cod_state->old_exc, PIT_MAX + L_INTERPOL);
+    Set_zero(cod_state->mem_syn, M);
+    Set_zero(cod_state->past_isfq, M);
+    cod_state->mem_w0 = 0;
+    cod_state->tilt_code = 0;
+    cod_state->first_frame = 1;
+    Init_gp_clip(cod_state->gp_clip);
+    cod_state->L_gc_thres = 0;
+    if (reset_all != 0)
+    {
+        /* Static vectors to zero */
+        Set_zero(cod_state->old_speech, L_TOTAL - L_FRAME);
+        Set_zero(cod_state->old_wsp, (PIT_MAX / OPL_DECIM));
+        Set_zero(cod_state->mem_decim2, 3);
+        /* routines initialization */
+        Init_Decim_12k8(cod_state->mem_decim);
+        Init_HP50_12k8(cod_state->mem_sig_in);
+        Init_Levinson(cod_state->mem_levinson);
+        Init_Q_gain2(cod_state->qua_gain);
+        Init_Hp_wsp(cod_state->hp_wsp_mem);
+        /* isp initialization */
+        Copy(isp_init, cod_state->ispold, M);
+        Copy(isp_init, cod_state->ispold_q, M);
+        /* variable initialization */
+        cod_state->mem_preemph = 0;
+        cod_state->mem_wsp = 0;
+        cod_state->Q_old = 15;
+        cod_state->Q_max[0] = 15;
+        cod_state->Q_max[1] = 15;
+        cod_state->old_wsp_max = 0;
+        cod_state->old_wsp_shift = 0;
+        /* pitch ol initialization */
+        cod_state->old_T0_med = 40;
+        cod_state->ol_gain = 0;
+        cod_state->ada_w = 0;
+        cod_state->ol_wght_flg = 0;
+        for (i = 0; i < 5; i++)
+        {
+            cod_state->old_ol_lag[i] = 40;
+        }
+        Set_zero(cod_state->old_hp_wsp, (L_FRAME / 2) / OPL_DECIM + (PIT_MAX / OPL_DECIM));
+        Set_zero(cod_state->mem_syn_hf, M);
+        Set_zero(cod_state->mem_syn_hi, M);
+        Set_zero(cod_state->mem_syn_lo, M);
+        Init_HP50_12k8(cod_state->mem_sig_out);
+        Init_Filt_6k_7k(cod_state->mem_hf);
+        Init_HP400_12k8(cod_state->mem_hp400);
+        Copy(isf_init, cod_state->isfold, M);
+        cod_state->mem_deemph = 0;
+        cod_state->seed2 = 21845;
+        Init_Filt_6k_7k(cod_state->mem_hf2);
+        cod_state->gain_alpha = 32767;
+        cod_state->vad_hist = 0;
+        wb_vad_reset(cod_state->vadSt);
+        dtx_enc_reset(cod_state->dtx_encSt, isf_init);
+    }
+    return;
 }
 
 /*-----------------------------------------------------------------*
@@ -149,1176 +149,1180 @@
 *                                                                 *
 *-----------------------------------------------------------------*/
 void coder(
-		Word16 * mode,                        /* input :  used mode                             */
-		Word16 speech16k[],                   /* input :  320 new speech samples (at 16 kHz)    */
-		Word16 prms[],                        /* output:  output parameters                     */
-		Word16 * ser_size,                    /* output:  bit rate of the used mode             */
-		void *spe_state,                      /* i/o   :  State structure                       */
-		Word16 allow_dtx                      /* input :  DTX ON/OFF                            */
-	  )
+        Word16 * mode,                        /* input :  used mode                             */
+        Word16 speech16k[],                   /* input :  320 new speech samples (at 16 kHz)    */
+        Word16 prms[],                        /* output:  output parameters                     */
+        Word16 * ser_size,                    /* output:  bit rate of the used mode             */
+        void *spe_state,                      /* i/o   :  State structure                       */
+        Word16 allow_dtx                      /* input :  DTX ON/OFF                            */
+      )
 {
-	/* Coder states */
-	Coder_State *st;
-	/* Speech vector */
-	Word16 old_speech[L_TOTAL];
-	Word16 *new_speech, *speech, *p_window;
+    /* Coder states */
+    Coder_State *st;
+    /* Speech vector */
+    Word16 old_speech[L_TOTAL];
+    Word16 *new_speech, *speech, *p_window;
 
-	/* Weighted speech vector */
-	Word16 old_wsp[L_FRAME + (PIT_MAX / OPL_DECIM)];
-	Word16 *wsp;
+    /* Weighted speech vector */
+    Word16 old_wsp[L_FRAME + (PIT_MAX / OPL_DECIM)];
+    Word16 *wsp;
 
-	/* Excitation vector */
-	Word16 old_exc[(L_FRAME + 1) + PIT_MAX + L_INTERPOL];
-	Word16 *exc;
+    /* Excitation vector */
+    Word16 old_exc[(L_FRAME + 1) + PIT_MAX + L_INTERPOL];
+    Word16 *exc;
 
-	/* LPC coefficients */
-	Word16 r_h[M + 1], r_l[M + 1];         /* Autocorrelations of windowed speech  */
-	Word16 rc[M];                          /* Reflection coefficients.             */
-	Word16 Ap[M + 1];                      /* A(z) with spectral expansion         */
-	Word16 ispnew[M];                      /* immittance spectral pairs at 4nd sfr */
-	Word16 ispnew_q[M];                    /* quantized ISPs at 4nd subframe       */
-	Word16 isf[M];                         /* ISF (frequency domain) at 4nd sfr    */
-	Word16 *p_A, *p_Aq;                    /* ptr to A(z) for the 4 subframes      */
-	Word16 A[NB_SUBFR * (M + 1)];          /* A(z) unquantized for the 4 subframes */
-	Word16 Aq[NB_SUBFR * (M + 1)];         /* A(z)   quantized for the 4 subframes */
+    /* LPC coefficients */
+    Word16 r_h[M + 1], r_l[M + 1];         /* Autocorrelations of windowed speech  */
+    Word16 rc[M];                          /* Reflection coefficients.             */
+    Word16 Ap[M + 1];                      /* A(z) with spectral expansion         */
+    Word16 ispnew[M];                      /* immittance spectral pairs at 4nd sfr */
+    Word16 ispnew_q[M];                    /* quantized ISPs at 4nd subframe       */
+    Word16 isf[M];                         /* ISF (frequency domain) at 4nd sfr    */
+    Word16 *p_A, *p_Aq;                    /* ptr to A(z) for the 4 subframes      */
+    Word16 A[NB_SUBFR * (M + 1)];          /* A(z) unquantized for the 4 subframes */
+    Word16 Aq[NB_SUBFR * (M + 1)];         /* A(z)   quantized for the 4 subframes */
 
-	/* Other vectors */
-	Word16 xn[L_SUBFR];                    /* Target vector for pitch search     */
-	Word16 xn2[L_SUBFR];                   /* Target vector for codebook search  */
-	Word16 dn[L_SUBFR];                    /* Correlation between xn2 and h1     */
-	Word16 cn[L_SUBFR];                    /* Target vector in residual domain   */
-	Word16 h1[L_SUBFR];                    /* Impulse response vector            */
-	Word16 h2[L_SUBFR];                    /* Impulse response vector            */
-	Word16 code[L_SUBFR];                  /* Fixed codebook excitation          */
-	Word16 y1[L_SUBFR];                    /* Filtered adaptive excitation       */
-	Word16 y2[L_SUBFR];                    /* Filtered adaptive excitation       */
-	Word16 error[M + L_SUBFR];             /* error of quantization              */
-	Word16 synth[L_SUBFR];                 /* 12.8kHz synthesis vector           */
-	Word16 exc2[L_FRAME];                  /* excitation vector                  */
-	Word16 buf[L_FRAME];                   /* VAD buffer                         */
+    /* Other vectors */
+    Word16 xn[L_SUBFR];                    /* Target vector for pitch search     */
+    Word16 xn2[L_SUBFR];                   /* Target vector for codebook search  */
+    Word16 dn[L_SUBFR];                    /* Correlation between xn2 and h1     */
+    Word16 cn[L_SUBFR];                    /* Target vector in residual domain   */
+    Word16 h1[L_SUBFR];                    /* Impulse response vector            */
+    Word16 h2[L_SUBFR];                    /* Impulse response vector            */
+    Word16 code[L_SUBFR];                  /* Fixed codebook excitation          */
+    Word16 y1[L_SUBFR];                    /* Filtered adaptive excitation       */
+    Word16 y2[L_SUBFR];                    /* Filtered adaptive excitation       */
+    Word16 error[M + L_SUBFR];             /* error of quantization              */
+    Word16 synth[L_SUBFR];                 /* 12.8kHz synthesis vector           */
+    Word16 exc2[L_FRAME];                  /* excitation vector                  */
+    Word16 buf[L_FRAME];                   /* VAD buffer                         */
 
-	/* Scalars */
-	Word32 i, j, i_subfr, select, pit_flag, clip_gain, vad_flag;
-	Word16 codec_mode;
-	Word16 T_op, T_op2, T0, T0_min, T0_max, T0_frac, index;
-	Word16 gain_pit, gain_code, g_coeff[4], g_coeff2[4];
-	Word16 tmp, gain1, gain2, exp, Q_new, mu, shift, max;
-	Word16 voice_fac;
-	Word16 indice[8];
-	Word32 L_tmp, L_gain_code, L_max, L_tmp1;
-	Word16 code2[L_SUBFR];                         /* Fixed codebook excitation  */
-	Word16 stab_fac, fac, gain_code_lo;
+    /* Scalars */
+    Word32 i, j, i_subfr, select, pit_flag, clip_gain, vad_flag;
+    Word16 codec_mode;
+    Word16 T_op, T_op2, T0, T0_min, T0_max, T0_frac, index;
+    Word16 gain_pit, gain_code, g_coeff[4], g_coeff2[4];
+    Word16 tmp, gain1, gain2, exp, Q_new, mu, shift, max;
+    Word16 voice_fac;
+    Word16 indice[8];
+    Word32 L_tmp, L_gain_code, L_max, L_tmp1;
+    Word16 code2[L_SUBFR];                         /* Fixed codebook excitation  */
+    Word16 stab_fac, fac, gain_code_lo;
 
-	Word16 corr_gain;
-	Word16 *vo_p0, *vo_p1, *vo_p2, *vo_p3;
+    Word16 corr_gain;
+    Word16 *vo_p0, *vo_p1, *vo_p2, *vo_p3;
 
-	st = (Coder_State *) spe_state;
+    st = (Coder_State *) spe_state;
 
-	*ser_size = nb_of_bits[*mode];
-	codec_mode = *mode;
+    *ser_size = nb_of_bits[*mode];
+    codec_mode = *mode;
 
-	/*--------------------------------------------------------------------------*
-	 *          Initialize pointers to speech vector.                           *
-	 *                                                                          *
-	 *                                                                          *
-	 *                    |-------|-------|-------|-------|-------|-------|     *
-	 *                     past sp   sf1     sf2     sf3     sf4    L_NEXT      *
-	 *                    <-------  Total speech buffer (L_TOTAL)   ------>     *
-	 *              old_speech                                                  *
-	 *                    <-------  LPC analysis window (L_WINDOW)  ------>     *
-	 *                    |       <-- present frame (L_FRAME) ---->             *
-	 *                   p_window |       <----- new speech (L_FRAME) ---->     *
-	 *                            |       |                                     *
-	 *                          speech    |                                     *
-	 *                                 new_speech                               *
-	 *--------------------------------------------------------------------------*/
+    /*--------------------------------------------------------------------------*
+     *          Initialize pointers to speech vector.                           *
+     *                                                                          *
+     *                                                                          *
+     *                    |-------|-------|-------|-------|-------|-------|     *
+     *                     past sp   sf1     sf2     sf3     sf4    L_NEXT      *
+     *                    <-------  Total speech buffer (L_TOTAL)   ------>     *
+     *              old_speech                                                  *
+     *                    <-------  LPC analysis window (L_WINDOW)  ------>     *
+     *                    |       <-- present frame (L_FRAME) ---->             *
+     *                   p_window |       <----- new speech (L_FRAME) ---->     *
+     *                            |       |                                     *
+     *                          speech    |                                     *
+     *                                 new_speech                               *
+     *--------------------------------------------------------------------------*/
 
-	new_speech = old_speech + L_TOTAL - L_FRAME - L_FILT;         /* New speech     */
-	speech = old_speech + L_TOTAL - L_FRAME - L_NEXT;             /* Present frame  */
-	p_window = old_speech + L_TOTAL - L_WINDOW;
+    new_speech = old_speech + L_TOTAL - L_FRAME - L_FILT;         /* New speech     */
+    speech = old_speech + L_TOTAL - L_FRAME - L_NEXT;             /* Present frame  */
+    p_window = old_speech + L_TOTAL - L_WINDOW;
 
-	exc = old_exc + PIT_MAX + L_INTERPOL;
-	wsp = old_wsp + (PIT_MAX / OPL_DECIM);
+    exc = old_exc + PIT_MAX + L_INTERPOL;
+    wsp = old_wsp + (PIT_MAX / OPL_DECIM);
 
-	/* copy coder memory state into working space */
-	Copy(st->old_speech, old_speech, L_TOTAL - L_FRAME);
-	Copy(st->old_wsp, old_wsp, PIT_MAX / OPL_DECIM);
-	Copy(st->old_exc, old_exc, PIT_MAX + L_INTERPOL);
+    /* copy coder memory state into working space */
+    Copy(st->old_speech, old_speech, L_TOTAL - L_FRAME);
+    Copy(st->old_wsp, old_wsp, PIT_MAX / OPL_DECIM);
+    Copy(st->old_exc, old_exc, PIT_MAX + L_INTERPOL);
 
-	/*---------------------------------------------------------------*
-	 * Down sampling signal from 16kHz to 12.8kHz                    *
-	 * -> The signal is extended by L_FILT samples (padded to zero)  *
-	 * to avoid additional delay (L_FILT samples) in the coder.      *
-	 * The last L_FILT samples are approximated after decimation and *
-	 * are used (and windowed) only in autocorrelations.             *
-	 *---------------------------------------------------------------*/
+    /*---------------------------------------------------------------*
+     * Down sampling signal from 16kHz to 12.8kHz                    *
+     * -> The signal is extended by L_FILT samples (padded to zero)  *
+     * to avoid additional delay (L_FILT samples) in the coder.      *
+     * The last L_FILT samples are approximated after decimation and *
+     * are used (and windowed) only in autocorrelations.             *
+     *---------------------------------------------------------------*/
 
-	Decim_12k8(speech16k, L_FRAME16k, new_speech, st->mem_decim);
+    Decim_12k8(speech16k, L_FRAME16k, new_speech, st->mem_decim);
 
-	/* last L_FILT samples for autocorrelation window */
-	Copy(st->mem_decim, code, 2 * L_FILT16k);
-	Set_zero(error, L_FILT16k);            /* set next sample to zero */
-	Decim_12k8(error, L_FILT16k, new_speech + L_FRAME, code);
+    /* last L_FILT samples for autocorrelation window */
+    Copy(st->mem_decim, code, 2 * L_FILT16k);
+    Set_zero(error, L_FILT16k);            /* set next sample to zero */
+    Decim_12k8(error, L_FILT16k, new_speech + L_FRAME, code);
 
-	/*---------------------------------------------------------------*
-	 * Perform 50Hz HP filtering of input signal.                    *
-	 *---------------------------------------------------------------*/
+    /*---------------------------------------------------------------*
+     * Perform 50Hz HP filtering of input signal.                    *
+     *---------------------------------------------------------------*/
 
-	HP50_12k8(new_speech, L_FRAME, st->mem_sig_in);
+    HP50_12k8(new_speech, L_FRAME, st->mem_sig_in);
 
-	/* last L_FILT samples for autocorrelation window */
-	Copy(st->mem_sig_in, code, 6);
-	HP50_12k8(new_speech + L_FRAME, L_FILT, code);
+    /* last L_FILT samples for autocorrelation window */
+    Copy(st->mem_sig_in, code, 6);
+    HP50_12k8(new_speech + L_FRAME, L_FILT, code);
 
-	/*---------------------------------------------------------------*
-	 * Perform fixed preemphasis through 1 - g z^-1                  *
-	 * Scale signal to get maximum of precision in filtering         *
-	 *---------------------------------------------------------------*/
+    /*---------------------------------------------------------------*
+     * Perform fixed preemphasis through 1 - g z^-1                  *
+     * Scale signal to get maximum of precision in filtering         *
+     *---------------------------------------------------------------*/
 
-	mu = PREEMPH_FAC >> 1;              /* Q15 --> Q14 */
+    mu = PREEMPH_FAC >> 1;              /* Q15 --> Q14 */
 
-	/* get max of new preemphased samples (L_FRAME+L_FILT) */
-	L_tmp = new_speech[0] << 15;
-	L_tmp -= (st->mem_preemph * mu)<<1;
-	L_max = L_abs(L_tmp);
+    /* get max of new preemphased samples (L_FRAME+L_FILT) */
+    L_tmp = new_speech[0] << 15;
+    L_tmp -= (st->mem_preemph * mu)<<1;
+    L_max = L_abs(L_tmp);
 
-	for (i = 1; i < L_FRAME + L_FILT; i++)
-	{
-		L_tmp = new_speech[i] << 15;
-		L_tmp -= (new_speech[i - 1] * mu)<<1;
-		L_tmp = L_abs(L_tmp);
-		if(L_tmp > L_max)
-		{
-			L_max = L_tmp;
-		}
-	}
+    for (i = 1; i < L_FRAME + L_FILT; i++)
+    {
+        L_tmp = new_speech[i] << 15;
+        L_tmp -= (new_speech[i - 1] * mu)<<1;
+        L_tmp = L_abs(L_tmp);
+        if(L_tmp > L_max)
+        {
+            L_max = L_tmp;
+        }
+    }
 
-	/* get scaling factor for new and previous samples */
-	/* limit scaling to Q_MAX to keep dynamic for ringing in low signal */
-	/* limit scaling to Q_MAX also to avoid a[0]<1 in syn_filt_32 */
-	tmp = extract_h(L_max);
-	if (tmp == 0)
-	{
-		shift = Q_MAX;
-	} else
-	{
-		shift = norm_s(tmp) - 1;
-		if (shift < 0)
-		{
-			shift = 0;
-		}
-		if (shift > Q_MAX)
-		{
-			shift = Q_MAX;
-		}
-	}
-	Q_new = shift;
-	if (Q_new > st->Q_max[0])
-	{
-		Q_new = st->Q_max[0];
-	}
-	if (Q_new > st->Q_max[1])
-	{
-		Q_new = st->Q_max[1];
-	}
-	exp = (Q_new - st->Q_old);
-	st->Q_old = Q_new;
-	st->Q_max[1] = st->Q_max[0];
-	st->Q_max[0] = shift;
+    /* get scaling factor for new and previous samples */
+    /* limit scaling to Q_MAX to keep dynamic for ringing in low signal */
+    /* limit scaling to Q_MAX also to avoid a[0]<1 in syn_filt_32 */
+    tmp = extract_h(L_max);
+    if (tmp == 0)
+    {
+        shift = Q_MAX;
+    } else
+    {
+        shift = norm_s(tmp) - 1;
+        if (shift < 0)
+        {
+            shift = 0;
+        }
+        if (shift > Q_MAX)
+        {
+            shift = Q_MAX;
+        }
+    }
+    Q_new = shift;
+    if (Q_new > st->Q_max[0])
+    {
+        Q_new = st->Q_max[0];
+    }
+    if (Q_new > st->Q_max[1])
+    {
+        Q_new = st->Q_max[1];
+    }
+    exp = (Q_new - st->Q_old);
+    st->Q_old = Q_new;
+    st->Q_max[1] = st->Q_max[0];
+    st->Q_max[0] = shift;
 
-	/* preemphasis with scaling (L_FRAME+L_FILT) */
-	tmp = new_speech[L_FRAME - 1];
+    /* preemphasis with scaling (L_FRAME+L_FILT) */
+    tmp = new_speech[L_FRAME - 1];
 
-	for (i = L_FRAME + L_FILT - 1; i > 0; i--)
-	{
-		L_tmp = new_speech[i] << 15;
-		L_tmp -= (new_speech[i - 1] * mu)<<1;
-		L_tmp = (L_tmp << Q_new);
-		new_speech[i] = vo_round(L_tmp);
-	}
+    for (i = L_FRAME + L_FILT - 1; i > 0; i--)
+    {
+        L_tmp = new_speech[i] << 15;
+        L_tmp -= (new_speech[i - 1] * mu)<<1;
+        L_tmp = (L_tmp << Q_new);
+        new_speech[i] = vo_round(L_tmp);
+    }
 
-	L_tmp = new_speech[0] << 15;
-	L_tmp -= (st->mem_preemph * mu)<<1;
-	L_tmp = (L_tmp << Q_new);
-	new_speech[0] = vo_round(L_tmp);
+    L_tmp = new_speech[0] << 15;
+    L_tmp -= (st->mem_preemph * mu)<<1;
+    L_tmp = (L_tmp << Q_new);
+    new_speech[0] = vo_round(L_tmp);
 
-	st->mem_preemph = tmp;
+    st->mem_preemph = tmp;
 
-	/* scale previous samples and memory */
+    /* scale previous samples and memory */
 
-	Scale_sig(old_speech, L_TOTAL - L_FRAME - L_FILT, exp);
-	Scale_sig(old_exc, PIT_MAX + L_INTERPOL, exp);
-	Scale_sig(st->mem_syn, M, exp);
-	Scale_sig(st->mem_decim2, 3, exp);
-	Scale_sig(&(st->mem_wsp), 1, exp);
-	Scale_sig(&(st->mem_w0), 1, exp);
+    Scale_sig(old_speech, L_TOTAL - L_FRAME - L_FILT, exp);
+    Scale_sig(old_exc, PIT_MAX + L_INTERPOL, exp);
+    Scale_sig(st->mem_syn, M, exp);
+    Scale_sig(st->mem_decim2, 3, exp);
+    Scale_sig(&(st->mem_wsp), 1, exp);
+    Scale_sig(&(st->mem_w0), 1, exp);
 
-	/*------------------------------------------------------------------------*
-	 *  Call VAD                                                              *
-	 *  Preemphesis scale down signal in low frequency and keep dynamic in HF.*
-	 *  Vad work slightly in futur (new_speech = speech + L_NEXT - L_FILT).   *
-	 *------------------------------------------------------------------------*/
-	Copy(new_speech, buf, L_FRAME);
+    /*------------------------------------------------------------------------*
+     *  Call VAD                                                              *
+     *  Preemphesis scale down signal in low frequency and keep dynamic in HF.*
+     *  Vad work slightly in futur (new_speech = speech + L_NEXT - L_FILT).   *
+     *------------------------------------------------------------------------*/
+    Copy(new_speech, buf, L_FRAME);
 
 #ifdef ASM_OPT        /* asm optimization branch */
-	Scale_sig_opt(buf, L_FRAME, 1 - Q_new);
+    Scale_sig_opt(buf, L_FRAME, 1 - Q_new);
 #else
-	Scale_sig(buf, L_FRAME, 1 - Q_new);
+    Scale_sig(buf, L_FRAME, 1 - Q_new);
 #endif
 
-	vad_flag = wb_vad(st->vadSt, buf);          /* Voice Activity Detection */
-	if (vad_flag == 0)
-	{
-		st->vad_hist = (st->vad_hist + 1);
-	} else
-	{
-		st->vad_hist = 0;
-	}
+    vad_flag = wb_vad(st->vadSt, buf);          /* Voice Activity Detection */
+    if (vad_flag == 0)
+    {
+        st->vad_hist = (st->vad_hist + 1);
+    } else
+    {
+        st->vad_hist = 0;
+    }
 
-	/* DTX processing */
-	if (allow_dtx != 0)
-	{
-		/* Note that mode may change here */
-		tx_dtx_handler(st->dtx_encSt, vad_flag, mode);
-		*ser_size = nb_of_bits[*mode];
-	}
+    /* DTX processing */
+    if (allow_dtx != 0)
+    {
+        /* Note that mode may change here */
+        tx_dtx_handler(st->dtx_encSt, vad_flag, mode);
+        *ser_size = nb_of_bits[*mode];
+    }
 
-	if(*mode != MRDTX)
-	{
-		Parm_serial(vad_flag, 1, &prms);
-	}
-	/*------------------------------------------------------------------------*
-	 *  Perform LPC analysis                                                  *
-	 *  ~~~~~~~~~~~~~~~~~~~~                                                  *
-	 *   - autocorrelation + lag windowing                                    *
-	 *   - Levinson-durbin algorithm to find a[]                              *
-	 *   - convert a[] to isp[]                                               *
-	 *   - convert isp[] to isf[] for quantization                            *
-	 *   - quantize and code the isf[]                                        *
-	 *   - convert isf[] to isp[] for interpolation                           *
-	 *   - find the interpolated ISPs and convert to a[] for the 4 subframes  *
-	 *------------------------------------------------------------------------*/
+    if(*mode != MRDTX)
+    {
+        Parm_serial(vad_flag, 1, &prms);
+    }
+    /*------------------------------------------------------------------------*
+     *  Perform LPC analysis                                                  *
+     *  ~~~~~~~~~~~~~~~~~~~~                                                  *
+     *   - autocorrelation + lag windowing                                    *
+     *   - Levinson-durbin algorithm to find a[]                              *
+     *   - convert a[] to isp[]                                               *
+     *   - convert isp[] to isf[] for quantization                            *
+     *   - quantize and code the isf[]                                        *
+     *   - convert isf[] to isp[] for interpolation                           *
+     *   - find the interpolated ISPs and convert to a[] for the 4 subframes  *
+     *------------------------------------------------------------------------*/
 
-	/* LP analysis centered at 4nd subframe */
-	Autocorr(p_window, M, r_h, r_l);                        /* Autocorrelations */
-	Lag_window(r_h, r_l);                                   /* Lag windowing    */
-	Levinson(r_h, r_l, A, rc, st->mem_levinson);            /* Levinson Durbin  */
-	Az_isp(A, ispnew, st->ispold);                          /* From A(z) to ISP */
+    /* LP analysis centered at 4nd subframe */
+    Autocorr(p_window, M, r_h, r_l);                        /* Autocorrelations */
+    Lag_window(r_h, r_l);                                   /* Lag windowing    */
+    Levinson(r_h, r_l, A, rc, st->mem_levinson);            /* Levinson Durbin  */
+    Az_isp(A, ispnew, st->ispold);                          /* From A(z) to ISP */
 
-	/* Find the interpolated ISPs and convert to a[] for all subframes */
-	Int_isp(st->ispold, ispnew, interpol_frac, A);
+    /* Find the interpolated ISPs and convert to a[] for all subframes */
+    Int_isp(st->ispold, ispnew, interpol_frac, A);
 
-	/* update ispold[] for the next frame */
-	Copy(ispnew, st->ispold, M);
+    /* update ispold[] for the next frame */
+    Copy(ispnew, st->ispold, M);
 
-	/* Convert ISPs to frequency domain 0..6400 */
-	Isp_isf(ispnew, isf, M);
+    /* Convert ISPs to frequency domain 0..6400 */
+    Isp_isf(ispnew, isf, M);
 
-	/* check resonance for pitch clipping algorithm */
-	Gp_clip_test_isf(isf, st->gp_clip);
+    /* check resonance for pitch clipping algorithm */
+    Gp_clip_test_isf(isf, st->gp_clip);
 
-	/*----------------------------------------------------------------------*
-	 *  Perform PITCH_OL analysis                                           *
-	 *  ~~~~~~~~~~~~~~~~~~~~~~~~~                                           *
-	 * - Find the residual res[] for the whole speech frame                 *
-	 * - Find the weighted input speech wsp[] for the whole speech frame    *
-	 * - scale wsp[] to avoid overflow in pitch estimation                  *
-	 * - Find open loop pitch lag for whole speech frame                    *
-	 *----------------------------------------------------------------------*/
-	p_A = A;
-	for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
-	{
-		/* Weighting of LPC coefficients */
-		Weight_a(p_A, Ap, GAMMA1, M);
+    /*----------------------------------------------------------------------*
+     *  Perform PITCH_OL analysis                                           *
+     *  ~~~~~~~~~~~~~~~~~~~~~~~~~                                           *
+     * - Find the residual res[] for the whole speech frame                 *
+     * - Find the weighted input speech wsp[] for the whole speech frame    *
+     * - scale wsp[] to avoid overflow in pitch estimation                  *
+     * - Find open loop pitch lag for whole speech frame                    *
+     *----------------------------------------------------------------------*/
+    p_A = A;
+    for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
+    {
+        /* Weighting of LPC coefficients */
+        Weight_a(p_A, Ap, GAMMA1, M);
 
 #ifdef ASM_OPT                    /* asm optimization branch */
-		Residu_opt(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
+        Residu_opt(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
 #else
-		Residu(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
+        Residu(Ap, &speech[i_subfr], &wsp[i_subfr], L_SUBFR);
 #endif
 
-		p_A += (M + 1);
-	}
+        p_A += (M + 1);
+    }
 
-	Deemph2(wsp, TILT_FAC, L_FRAME, &(st->mem_wsp));
+    Deemph2(wsp, TILT_FAC, L_FRAME, &(st->mem_wsp));
 
-	/* find maximum value on wsp[] for 12 bits scaling */
-	max = 0;
-	for (i = 0; i < L_FRAME; i++)
-	{
-		tmp = abs_s(wsp[i]);
-		if(tmp > max)
-		{
-			max = tmp;
-		}
-	}
-	tmp = st->old_wsp_max;
-	if(max > tmp)
-	{
-		tmp = max;                         /* tmp = max(wsp_max, old_wsp_max) */
-	}
-	st->old_wsp_max = max;
+    /* find maximum value on wsp[] for 12 bits scaling */
+    max = 0;
+    for (i = 0; i < L_FRAME; i++)
+    {
+        tmp = abs_s(wsp[i]);
+        if(tmp > max)
+        {
+            max = tmp;
+        }
+    }
+    tmp = st->old_wsp_max;
+    if(max > tmp)
+    {
+        tmp = max;                         /* tmp = max(wsp_max, old_wsp_max) */
+    }
+    st->old_wsp_max = max;
 
-	shift = norm_s(tmp) - 3;
-	if (shift > 0)
-	{
-		shift = 0;                         /* shift = 0..-3 */
-	}
-	/* decimation of wsp[] to search pitch in LF and to reduce complexity */
-	LP_Decim2(wsp, L_FRAME, st->mem_decim2);
+    shift = norm_s(tmp) - 3;
+    if (shift > 0)
+    {
+        shift = 0;                         /* shift = 0..-3 */
+    }
+    /* decimation of wsp[] to search pitch in LF and to reduce complexity */
+    LP_Decim2(wsp, L_FRAME, st->mem_decim2);
 
-	/* scale wsp[] in 12 bits to avoid overflow */
+    /* scale wsp[] in 12 bits to avoid overflow */
 #ifdef  ASM_OPT                  /* asm optimization branch */
-	Scale_sig_opt(wsp, L_FRAME / OPL_DECIM, shift);
+    Scale_sig_opt(wsp, L_FRAME / OPL_DECIM, shift);
 #else
-	Scale_sig(wsp, L_FRAME / OPL_DECIM, shift);
+    Scale_sig(wsp, L_FRAME / OPL_DECIM, shift);
 #endif
-	/* scale old_wsp (warning: exp must be Q_new-Q_old) */
-	exp = exp + (shift - st->old_wsp_shift);
-	st->old_wsp_shift = shift;
+    /* scale old_wsp (warning: exp must be Q_new-Q_old) */
+    exp = exp + (shift - st->old_wsp_shift);
+    st->old_wsp_shift = shift;
 
-	Scale_sig(old_wsp, PIT_MAX / OPL_DECIM, exp);
-	Scale_sig(st->old_hp_wsp, PIT_MAX / OPL_DECIM, exp);
+    Scale_sig(old_wsp, PIT_MAX / OPL_DECIM, exp);
+    Scale_sig(st->old_hp_wsp, PIT_MAX / OPL_DECIM, exp);
 
-	scale_mem_Hp_wsp(st->hp_wsp_mem, exp);
+    scale_mem_Hp_wsp(st->hp_wsp_mem, exp);
 
-	/* Find open loop pitch lag for whole speech frame */
+    /* Find open loop pitch lag for whole speech frame */
 
-	if(*ser_size == NBBITS_7k)
-	{
-		/* Find open loop pitch lag for whole speech frame */
-		T_op = Pitch_med_ol(wsp, st, L_FRAME / OPL_DECIM);
-	} else
-	{
-		/* Find open loop pitch lag for first 1/2 frame */
-		T_op = Pitch_med_ol(wsp, st, (L_FRAME/2) / OPL_DECIM);
-	}
+    if(*ser_size == NBBITS_7k)
+    {
+        /* Find open loop pitch lag for whole speech frame */
+        T_op = Pitch_med_ol(wsp, st, L_FRAME / OPL_DECIM);
+    } else
+    {
+        /* Find open loop pitch lag for first 1/2 frame */
+        T_op = Pitch_med_ol(wsp, st, (L_FRAME/2) / OPL_DECIM);
+    }
 
-	if(st->ol_gain > 19661)       /* 0.6 in Q15 */
-	{
-		st->old_T0_med = Med_olag(T_op, st->old_ol_lag);
-		st->ada_w = 32767;
-	} else
-	{
-		st->ada_w = vo_mult(st->ada_w, 29491);
-	}
+    if(st->ol_gain > 19661)       /* 0.6 in Q15 */
+    {
+        st->old_T0_med = Med_olag(T_op, st->old_ol_lag);
+        st->ada_w = 32767;
+    } else
+    {
+        st->ada_w = vo_mult(st->ada_w, 29491);
+    }
 
-	if(st->ada_w < 26214)
-		st->ol_wght_flg = 0;
-	else
-		st->ol_wght_flg = 1;
+    if(st->ada_w < 26214)
+        st->ol_wght_flg = 0;
+    else
+        st->ol_wght_flg = 1;
 
-	wb_vad_tone_detection(st->vadSt, st->ol_gain);
-	T_op *= OPL_DECIM;
+    wb_vad_tone_detection(st->vadSt, st->ol_gain);
+    T_op *= OPL_DECIM;
 
-	if(*ser_size != NBBITS_7k)
-	{
-		/* Find open loop pitch lag for second 1/2 frame */
-		T_op2 = Pitch_med_ol(wsp + ((L_FRAME / 2) / OPL_DECIM), st, (L_FRAME/2) / OPL_DECIM);
+    if(*ser_size != NBBITS_7k)
+    {
+        /* Find open loop pitch lag for second 1/2 frame */
+        T_op2 = Pitch_med_ol(wsp + ((L_FRAME / 2) / OPL_DECIM), st, (L_FRAME/2) / OPL_DECIM);
 
-		if(st->ol_gain > 19661)   /* 0.6 in Q15 */
-		{
-			st->old_T0_med = Med_olag(T_op2, st->old_ol_lag);
-			st->ada_w = 32767;
-		} else
-		{
-			st->ada_w = mult(st->ada_w, 29491);
-		}
+        if(st->ol_gain > 19661)   /* 0.6 in Q15 */
+        {
+            st->old_T0_med = Med_olag(T_op2, st->old_ol_lag);
+            st->ada_w = 32767;
+        } else
+        {
+            st->ada_w = mult(st->ada_w, 29491);
+        }
 
-		if(st->ada_w < 26214)
-			st->ol_wght_flg = 0;
-		else
-			st->ol_wght_flg = 1;
+        if(st->ada_w < 26214)
+            st->ol_wght_flg = 0;
+        else
+            st->ol_wght_flg = 1;
 
-		wb_vad_tone_detection(st->vadSt, st->ol_gain);
+        wb_vad_tone_detection(st->vadSt, st->ol_gain);
 
-		T_op2 *= OPL_DECIM;
+        T_op2 *= OPL_DECIM;
 
-	} else
-	{
-		T_op2 = T_op;
-	}
-	/*----------------------------------------------------------------------*
-	 *                              DTX-CNG                                 *
-	 *----------------------------------------------------------------------*/
-	if(*mode == MRDTX)            /* CNG mode */
-	{
-		/* Buffer isf's and energy */
+    } else
+    {
+        T_op2 = T_op;
+    }
+    /*----------------------------------------------------------------------*
+     *                              DTX-CNG                                 *
+     *----------------------------------------------------------------------*/
+    if(*mode == MRDTX)            /* CNG mode */
+    {
+        /* Buffer isf's and energy */
 #ifdef ASM_OPT                   /* asm optimization branch */
-		Residu_opt(&A[3 * (M + 1)], speech, exc, L_FRAME);
+        Residu_opt(&A[3 * (M + 1)], speech, exc, L_FRAME);
 #else
-		Residu(&A[3 * (M + 1)], speech, exc, L_FRAME);
+        Residu(&A[3 * (M + 1)], speech, exc, L_FRAME);
 #endif
 
-		for (i = 0; i < L_FRAME; i++)
-		{
-			exc2[i] = shr(exc[i], Q_new);
-		}
+        for (i = 0; i < L_FRAME; i++)
+        {
+            exc2[i] = shr(exc[i], Q_new);
+        }
 
-		L_tmp = 0;
-		for (i = 0; i < L_FRAME; i++)
-			L_tmp += (exc2[i] * exc2[i])<<1;
+        L_tmp = 0;
+        for (i = 0; i < L_FRAME; i++)
+            L_tmp += (exc2[i] * exc2[i])<<1;
 
-		L_tmp >>= 1;
+        L_tmp >>= 1;
 
-		dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
+        dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
 
-		/* Quantize and code the ISFs */
-		dtx_enc(st->dtx_encSt, isf, exc2, &prms);
+        /* Quantize and code the ISFs */
+        dtx_enc(st->dtx_encSt, isf, exc2, &prms);
 
-		/* Convert ISFs to the cosine domain */
-		Isf_isp(isf, ispnew_q, M);
-		Isp_Az(ispnew_q, Aq, M, 0);
+        /* Convert ISFs to the cosine domain */
+        Isf_isp(isf, ispnew_q, M);
+        Isp_Az(ispnew_q, Aq, M, 0);
 
-		for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
-		{
-			corr_gain = synthesis(Aq, &exc2[i_subfr], 0, &speech16k[i_subfr * 5 / 4], st);
-		}
-		Copy(isf, st->isfold, M);
+        for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
+        {
+            corr_gain = synthesis(Aq, &exc2[i_subfr], 0, &speech16k[i_subfr * 5 / 4], st);
+        }
+        Copy(isf, st->isfold, M);
 
-		/* reset speech coder memories */
-		Reset_encoder(st, 0);
+        /* reset speech coder memories */
+        Reset_encoder(st, 0);
 
-		/*--------------------------------------------------*
-		 * Update signal for next frame.                    *
-		 * -> save past of speech[] and wsp[].              *
-		 *--------------------------------------------------*/
+        /*--------------------------------------------------*
+         * Update signal for next frame.                    *
+         * -> save past of speech[] and wsp[].              *
+         *--------------------------------------------------*/
 
-		Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
-		Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
+        Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
+        Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
 
-		return;
-	}
-	/*----------------------------------------------------------------------*
-	 *                               ACELP                                  *
-	 *----------------------------------------------------------------------*/
+        return;
+    }
+    /*----------------------------------------------------------------------*
+     *                               ACELP                                  *
+     *----------------------------------------------------------------------*/
 
-	/* Quantize and code the ISFs */
+    /* Quantize and code the ISFs */
 
-	if (*ser_size <= NBBITS_7k)
-	{
-		Qpisf_2s_36b(isf, isf, st->past_isfq, indice, 4);
+    if (*ser_size <= NBBITS_7k)
+    {
+        Qpisf_2s_36b(isf, isf, st->past_isfq, indice, 4);
 
-		Parm_serial(indice[0], 8, &prms);
-		Parm_serial(indice[1], 8, &prms);
-		Parm_serial(indice[2], 7, &prms);
-		Parm_serial(indice[3], 7, &prms);
-		Parm_serial(indice[4], 6, &prms);
-	} else
-	{
-		Qpisf_2s_46b(isf, isf, st->past_isfq, indice, 4);
+        Parm_serial(indice[0], 8, &prms);
+        Parm_serial(indice[1], 8, &prms);
+        Parm_serial(indice[2], 7, &prms);
+        Parm_serial(indice[3], 7, &prms);
+        Parm_serial(indice[4], 6, &prms);
+    } else
+    {
+        Qpisf_2s_46b(isf, isf, st->past_isfq, indice, 4);
 
-		Parm_serial(indice[0], 8, &prms);
-		Parm_serial(indice[1], 8, &prms);
-		Parm_serial(indice[2], 6, &prms);
-		Parm_serial(indice[3], 7, &prms);
-		Parm_serial(indice[4], 7, &prms);
-		Parm_serial(indice[5], 5, &prms);
-		Parm_serial(indice[6], 5, &prms);
-	}
+        Parm_serial(indice[0], 8, &prms);
+        Parm_serial(indice[1], 8, &prms);
+        Parm_serial(indice[2], 6, &prms);
+        Parm_serial(indice[3], 7, &prms);
+        Parm_serial(indice[4], 7, &prms);
+        Parm_serial(indice[5], 5, &prms);
+        Parm_serial(indice[6], 5, &prms);
+    }
 
-	/* Check stability on isf : distance between old isf and current isf */
+    /* Check stability on isf : distance between old isf and current isf */
 
-	L_tmp = 0;
-	for (i = 0; i < M - 1; i++)
-	{
-		tmp = vo_sub(isf[i], st->isfold[i]);
-		L_tmp += (tmp * tmp)<<1;
-	}
+    L_tmp = 0;
+    for (i = 0; i < M - 1; i++)
+    {
+        tmp = vo_sub(isf[i], st->isfold[i]);
+        L_tmp += (tmp * tmp)<<1;
+    }
 
-	tmp = extract_h(L_shl2(L_tmp, 8));
+    tmp = extract_h(L_shl2(L_tmp, 8));
 
-	tmp = vo_mult(tmp, 26214);                /* tmp = L_tmp*0.8/256 */
-	tmp = vo_sub(20480, tmp);                 /* 1.25 - tmp (in Q14) */
+    tmp = vo_mult(tmp, 26214);                /* tmp = L_tmp*0.8/256 */
+    tmp = vo_sub(20480, tmp);                 /* 1.25 - tmp (in Q14) */
 
-	stab_fac = shl(tmp, 1);
+    stab_fac = shl(tmp, 1);
 
-	if (stab_fac < 0)
-	{
-		stab_fac = 0;
-	}
-	Copy(isf, st->isfold, M);
+    if (stab_fac < 0)
+    {
+        stab_fac = 0;
+    }
+    Copy(isf, st->isfold, M);
 
-	/* Convert ISFs to the cosine domain */
-	Isf_isp(isf, ispnew_q, M);
+    /* Convert ISFs to the cosine domain */
+    Isf_isp(isf, ispnew_q, M);
 
-	if (st->first_frame != 0)
-	{
-		st->first_frame = 0;
-		Copy(ispnew_q, st->ispold_q, M);
-	}
-	/* Find the interpolated ISPs and convert to a[] for all subframes */
+    if (st->first_frame != 0)
+    {
+        st->first_frame = 0;
+        Copy(ispnew_q, st->ispold_q, M);
+    }
+    /* Find the interpolated ISPs and convert to a[] for all subframes */
 
-	Int_isp(st->ispold_q, ispnew_q, interpol_frac, Aq);
+    Int_isp(st->ispold_q, ispnew_q, interpol_frac, Aq);
 
-	/* update ispold[] for the next frame */
-	Copy(ispnew_q, st->ispold_q, M);
+    /* update ispold[] for the next frame */
+    Copy(ispnew_q, st->ispold_q, M);
 
-	p_Aq = Aq;
-	for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
-	{
+    p_Aq = Aq;
+    for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
+    {
 #ifdef ASM_OPT               /* asm optimization branch */
-		Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
+        Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
 #else
-		Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
+        Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
 #endif
-		p_Aq += (M + 1);
-	}
+        p_Aq += (M + 1);
+    }
 
-	/* Buffer isf's and energy for dtx on non-speech frame */
-	if (vad_flag == 0)
-	{
-		for (i = 0; i < L_FRAME; i++)
-		{
-			exc2[i] = exc[i] >> Q_new;
-		}
-		L_tmp = 0;
-		for (i = 0; i < L_FRAME; i++)
-			L_tmp += (exc2[i] * exc2[i])<<1;
-		L_tmp >>= 1;
+    /* Buffer isf's and energy for dtx on non-speech frame */
+    if (vad_flag == 0)
+    {
+        for (i = 0; i < L_FRAME; i++)
+        {
+            exc2[i] = exc[i] >> Q_new;
+        }
+        L_tmp = 0;
+        for (i = 0; i < L_FRAME; i++) {
+            Word32 tmp = L_mult(exc2[i], exc2[i]); // (exc2[i] * exc2[i])<<1;
+            L_tmp = L_add(L_tmp, tmp);
+        }
+        L_tmp >>= 1;
 
-		dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
-	}
-	/* range for closed loop pitch search in 1st subframe */
+        dtx_buffer(st->dtx_encSt, isf, L_tmp, codec_mode);
+    }
+    /* range for closed loop pitch search in 1st subframe */
 
-	T0_min = T_op - 8;
-	if (T0_min < PIT_MIN)
-	{
-		T0_min = PIT_MIN;
-	}
-	T0_max = (T0_min + 15);
+    T0_min = T_op - 8;
+    if (T0_min < PIT_MIN)
+    {
+        T0_min = PIT_MIN;
+    }
+    T0_max = (T0_min + 15);
 
-	if(T0_max > PIT_MAX)
-	{
-		T0_max = PIT_MAX;
-		T0_min = T0_max - 15;
-	}
-	/*------------------------------------------------------------------------*
-	 *          Loop for every subframe in the analysis frame                 *
-	 *------------------------------------------------------------------------*
-	 *  To find the pitch and innovation parameters. The subframe size is     *
-	 *  L_SUBFR and the loop is repeated L_FRAME/L_SUBFR times.               *
-	 *     - compute the target signal for pitch search                       *
-	 *     - compute impulse response of weighted synthesis filter (h1[])     *
-	 *     - find the closed-loop pitch parameters                            *
-	 *     - encode the pitch dealy                                           *
-	 *     - find 2 lt prediction (with / without LP filter for lt pred)      *
-	 *     - find 2 pitch gains and choose the best lt prediction.            *
-	 *     - find target vector for codebook search                           *
-	 *     - update the impulse response h1[] for codebook search             *
-	 *     - correlation between target vector and impulse response           *
-	 *     - codebook search and encoding                                     *
-	 *     - VQ of pitch and codebook gains                                   *
-	 *     - find voicing factor and tilt of code for next subframe.          *
-	 *     - update states of weighting filter                                *
-	 *     - find excitation and synthesis speech                             *
-	 *------------------------------------------------------------------------*/
-	p_A = A;
-	p_Aq = Aq;
-	for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
-	{
-		pit_flag = i_subfr;
-		if ((i_subfr == 2 * L_SUBFR) && (*ser_size > NBBITS_7k))
-		{
-			pit_flag = 0;
-			/* range for closed loop pitch search in 3rd subframe */
-			T0_min = (T_op2 - 8);
+    if(T0_max > PIT_MAX)
+    {
+        T0_max = PIT_MAX;
+        T0_min = T0_max - 15;
+    }
+    /*------------------------------------------------------------------------*
+     *          Loop for every subframe in the analysis frame                 *
+     *------------------------------------------------------------------------*
+     *  To find the pitch and innovation parameters. The subframe size is     *
+     *  L_SUBFR and the loop is repeated L_FRAME/L_SUBFR times.               *
+     *     - compute the target signal for pitch search                       *
+     *     - compute impulse response of weighted synthesis filter (h1[])     *
+     *     - find the closed-loop pitch parameters                            *
+     *     - encode the pitch dealy                                           *
+     *     - find 2 lt prediction (with / without LP filter for lt pred)      *
+     *     - find 2 pitch gains and choose the best lt prediction.            *
+     *     - find target vector for codebook search                           *
+     *     - update the impulse response h1[] for codebook search             *
+     *     - correlation between target vector and impulse response           *
+     *     - codebook search and encoding                                     *
+     *     - VQ of pitch and codebook gains                                   *
+     *     - find voicing factor and tilt of code for next subframe.          *
+     *     - update states of weighting filter                                *
+     *     - find excitation and synthesis speech                             *
+     *------------------------------------------------------------------------*/
+    p_A = A;
+    p_Aq = Aq;
+    for (i_subfr = 0; i_subfr < L_FRAME; i_subfr += L_SUBFR)
+    {
+        pit_flag = i_subfr;
+        if ((i_subfr == 2 * L_SUBFR) && (*ser_size > NBBITS_7k))
+        {
+            pit_flag = 0;
+            /* range for closed loop pitch search in 3rd subframe */
+            T0_min = (T_op2 - 8);
 
-			if (T0_min < PIT_MIN)
-			{
-				T0_min = PIT_MIN;
-			}
-			T0_max = (T0_min + 15);
-			if (T0_max > PIT_MAX)
-			{
-				T0_max = PIT_MAX;
-				T0_min = (T0_max - 15);
-			}
-		}
-		/*-----------------------------------------------------------------------*
-		 *                                                                       *
-		 *        Find the target vector for pitch search:                       *
-		 *        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~                        *
-		 *                                                                       *
-		 *             |------|  res[n]                                          *
-		 * speech[n]---| A(z) |--------                                          *
-		 *             |------|       |   |--------| error[n]  |------|          *
-		 *                   zero -- (-)--| 1/A(z) |-----------| W(z) |-- target *
-		 *                   exc          |--------|           |------|          *
-		 *                                                                       *
-		 * Instead of subtracting the zero-input response of filters from        *
-		 * the weighted input speech, the above configuration is used to         *
-		 * compute the target vector.                                            *
-		 *                                                                       *
-		 *-----------------------------------------------------------------------*/
+            if (T0_min < PIT_MIN)
+            {
+                T0_min = PIT_MIN;
+            }
+            T0_max = (T0_min + 15);
+            if (T0_max > PIT_MAX)
+            {
+                T0_max = PIT_MAX;
+                T0_min = (T0_max - 15);
+            }
+        }
+        /*-----------------------------------------------------------------------*
+         *                                                                       *
+         *        Find the target vector for pitch search:                       *
+         *        ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~                        *
+         *                                                                       *
+         *             |------|  res[n]                                          *
+         * speech[n]---| A(z) |--------                                          *
+         *             |------|       |   |--------| error[n]  |------|          *
+         *                   zero -- (-)--| 1/A(z) |-----------| W(z) |-- target *
+         *                   exc          |--------|           |------|          *
+         *                                                                       *
+         * Instead of subtracting the zero-input response of filters from        *
+         * the weighted input speech, the above configuration is used to         *
+         * compute the target vector.                                            *
+         *                                                                       *
+         *-----------------------------------------------------------------------*/
 
-		for (i = 0; i < M; i++)
-		{
-			error[i] = vo_sub(speech[i + i_subfr - M], st->mem_syn[i]);
-		}
+        for (i = 0; i < M; i++)
+        {
+            error[i] = vo_sub(speech[i + i_subfr - M], st->mem_syn[i]);
+        }
 
 #ifdef ASM_OPT              /* asm optimization branch */
-		Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
+        Residu_opt(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
 #else
-		Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
+        Residu(p_Aq, &speech[i_subfr], &exc[i_subfr], L_SUBFR);
 #endif
-		Syn_filt(p_Aq, &exc[i_subfr], error + M, L_SUBFR, error, 0);
-		Weight_a(p_A, Ap, GAMMA1, M);
+        Syn_filt(p_Aq, &exc[i_subfr], error + M, L_SUBFR, error, 0);
+        Weight_a(p_A, Ap, GAMMA1, M);
 
 #ifdef ASM_OPT             /* asm optimization branch */
-		Residu_opt(Ap, error + M, xn, L_SUBFR);
+        Residu_opt(Ap, error + M, xn, L_SUBFR);
 #else
-		Residu(Ap, error + M, xn, L_SUBFR);
+        Residu(Ap, error + M, xn, L_SUBFR);
 #endif
-		Deemph2(xn, TILT_FAC, L_SUBFR, &(st->mem_w0));
+        Deemph2(xn, TILT_FAC, L_SUBFR, &(st->mem_w0));
 
-		/*----------------------------------------------------------------------*
-		 * Find approx. target in residual domain "cn[]" for inovation search.  *
-		 *----------------------------------------------------------------------*/
-		/* first half: xn[] --> cn[] */
-		Set_zero(code, M);
-		Copy(xn, code + M, L_SUBFR / 2);
-		tmp = 0;
-		Preemph2(code + M, TILT_FAC, L_SUBFR / 2, &tmp);
-		Weight_a(p_A, Ap, GAMMA1, M);
-		Syn_filt(Ap,code + M, code + M, L_SUBFR / 2, code, 0);
+        /*----------------------------------------------------------------------*
+         * Find approx. target in residual domain "cn[]" for inovation search.  *
+         *----------------------------------------------------------------------*/
+        /* first half: xn[] --> cn[] */
+        Set_zero(code, M);
+        Copy(xn, code + M, L_SUBFR / 2);
+        tmp = 0;
+        Preemph2(code + M, TILT_FAC, L_SUBFR / 2, &tmp);
+        Weight_a(p_A, Ap, GAMMA1, M);
+        Syn_filt(Ap,code + M, code + M, L_SUBFR / 2, code, 0);
 
 #ifdef ASM_OPT                /* asm optimization branch */
-		Residu_opt(p_Aq,code + M, cn, L_SUBFR / 2);
+        Residu_opt(p_Aq,code + M, cn, L_SUBFR / 2);
 #else
-		Residu(p_Aq,code + M, cn, L_SUBFR / 2);
+        Residu(p_Aq,code + M, cn, L_SUBFR / 2);
 #endif
 
-		/* second half: res[] --> cn[] (approximated and faster) */
-		Copy(&exc[i_subfr + (L_SUBFR / 2)], cn + (L_SUBFR / 2), L_SUBFR / 2);
+        /* second half: res[] --> cn[] (approximated and faster) */
+        Copy(&exc[i_subfr + (L_SUBFR / 2)], cn + (L_SUBFR / 2), L_SUBFR / 2);
 
-		/*---------------------------------------------------------------*
-		 * Compute impulse response, h1[], of weighted synthesis filter  *
-		 *---------------------------------------------------------------*/
+        /*---------------------------------------------------------------*
+         * Compute impulse response, h1[], of weighted synthesis filter  *
+         *---------------------------------------------------------------*/
 
-		Set_zero(error, M + L_SUBFR);
-		Weight_a(p_A, error + M, GAMMA1, M);
+        Set_zero(error, M + L_SUBFR);
+        Weight_a(p_A, error + M, GAMMA1, M);
 
-		vo_p0 = error+M;
-		vo_p3 = h1;
-		for (i = 0; i < L_SUBFR; i++)
-		{
-			L_tmp = *vo_p0 << 14;        /* x4 (Q12 to Q14) */
-			vo_p1 = p_Aq + 1;
-			vo_p2 = vo_p0-1;
-			for (j = 1; j <= M/4; j++)
-			{
-				L_tmp -= *vo_p1++ * *vo_p2--;
-				L_tmp -= *vo_p1++ * *vo_p2--;
-				L_tmp -= *vo_p1++ * *vo_p2--;
-				L_tmp -= *vo_p1++ * *vo_p2--;
-			}
-			*vo_p3++ = *vo_p0++ = vo_round((L_tmp <<4));
-		}
-		/* deemph without division by 2 -> Q14 to Q15 */
-		tmp = 0;
-		Deemph2(h1, TILT_FAC, L_SUBFR, &tmp);   /* h1 in Q14 */
+        vo_p0 = error+M;
+        vo_p3 = h1;
+        for (i = 0; i < L_SUBFR; i++)
+        {
+            L_tmp = *vo_p0 << 14;        /* x4 (Q12 to Q14) */
+            vo_p1 = p_Aq + 1;
+            vo_p2 = vo_p0-1;
+            for (j = 1; j <= M/4; j++)
+            {
+                L_tmp = L_sub(L_tmp, *vo_p1++ * *vo_p2--);
+                L_tmp = L_sub(L_tmp, *vo_p1++ * *vo_p2--);
+                L_tmp = L_sub(L_tmp, *vo_p1++ * *vo_p2--);
+                L_tmp = L_sub(L_tmp, *vo_p1++ * *vo_p2--);
+            }
+            *vo_p3++ = *vo_p0++ = vo_round((L_tmp <<4));
+        }
+        /* deemph without division by 2 -> Q14 to Q15 */
+        tmp = 0;
+        Deemph2(h1, TILT_FAC, L_SUBFR, &tmp);   /* h1 in Q14 */
 
-		/* h2 in Q12 for codebook search */
-		Copy(h1, h2, L_SUBFR);
+        /* h2 in Q12 for codebook search */
+        Copy(h1, h2, L_SUBFR);
 
-		/*---------------------------------------------------------------*
-		 * scale xn[] and h1[] to avoid overflow in dot_product12()      *
-		 *---------------------------------------------------------------*/
+        /*---------------------------------------------------------------*
+         * scale xn[] and h1[] to avoid overflow in dot_product12()      *
+         *---------------------------------------------------------------*/
 #ifdef  ASM_OPT                  /* asm optimization branch */
-		Scale_sig_opt(h2, L_SUBFR, -2);
-		Scale_sig_opt(xn, L_SUBFR, shift);     /* scaling of xn[] to limit dynamic at 12 bits */
-		Scale_sig_opt(h1, L_SUBFR, 1 + shift);  /* set h1[] in Q15 with scaling for convolution */
+        Scale_sig_opt(h2, L_SUBFR, -2);
+        Scale_sig_opt(xn, L_SUBFR, shift);     /* scaling of xn[] to limit dynamic at 12 bits */
+        Scale_sig_opt(h1, L_SUBFR, 1 + shift);  /* set h1[] in Q15 with scaling for convolution */
 #else
-		Scale_sig(h2, L_SUBFR, -2);
-		Scale_sig(xn, L_SUBFR, shift);     /* scaling of xn[] to limit dynamic at 12 bits */
-		Scale_sig(h1, L_SUBFR, 1 + shift);  /* set h1[] in Q15 with scaling for convolution */
+        Scale_sig(h2, L_SUBFR, -2);
+        Scale_sig(xn, L_SUBFR, shift);     /* scaling of xn[] to limit dynamic at 12 bits */
+        Scale_sig(h1, L_SUBFR, 1 + shift);  /* set h1[] in Q15 with scaling for convolution */
 #endif
-		/*----------------------------------------------------------------------*
-		 *                 Closed-loop fractional pitch search                  *
-		 *----------------------------------------------------------------------*/
-		/* find closed loop fractional pitch  lag */
-		if(*ser_size <= NBBITS_9k)
-		{
-			T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
-					pit_flag, PIT_MIN, PIT_FR1_8b, L_SUBFR);
+        /*----------------------------------------------------------------------*
+         *                 Closed-loop fractional pitch search                  *
+         *----------------------------------------------------------------------*/
+        /* find closed loop fractional pitch  lag */
+        if(*ser_size <= NBBITS_9k)
+        {
+            T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
+                    pit_flag, PIT_MIN, PIT_FR1_8b, L_SUBFR);
 
-			/* encode pitch lag */
-			if (pit_flag == 0)             /* if 1st/3rd subframe */
-			{
-				/*--------------------------------------------------------------*
-				 * The pitch range for the 1st/3rd subframe is encoded with     *
-				 * 8 bits and is divided as follows:                            *
-				 *   PIT_MIN to PIT_FR1-1  resolution 1/2 (frac = 0 or 2)       *
-				 *   PIT_FR1 to PIT_MAX    resolution 1   (frac = 0)            *
-				 *--------------------------------------------------------------*/
-				if (T0 < PIT_FR1_8b)
-				{
-					index = ((T0 << 1) + (T0_frac >> 1) - (PIT_MIN<<1));
-				} else
-				{
-					index = ((T0 - PIT_FR1_8b) + ((PIT_FR1_8b - PIT_MIN)*2));
-				}
+            /* encode pitch lag */
+            if (pit_flag == 0)             /* if 1st/3rd subframe */
+            {
+                /*--------------------------------------------------------------*
+                 * The pitch range for the 1st/3rd subframe is encoded with     *
+                 * 8 bits and is divided as follows:                            *
+                 *   PIT_MIN to PIT_FR1-1  resolution 1/2 (frac = 0 or 2)       *
+                 *   PIT_FR1 to PIT_MAX    resolution 1   (frac = 0)            *
+                 *--------------------------------------------------------------*/
+                if (T0 < PIT_FR1_8b)
+                {
+                    index = ((T0 << 1) + (T0_frac >> 1) - (PIT_MIN<<1));
+                } else
+                {
+                    index = ((T0 - PIT_FR1_8b) + ((PIT_FR1_8b - PIT_MIN)*2));
+                }
 
-				Parm_serial(index, 8, &prms);
+                Parm_serial(index, 8, &prms);
 
-				/* find T0_min and T0_max for subframe 2 and 4 */
-				T0_min = (T0 - 8);
-				if (T0_min < PIT_MIN)
-				{
-					T0_min = PIT_MIN;
-				}
-				T0_max = T0_min + 15;
-				if (T0_max > PIT_MAX)
-				{
-					T0_max = PIT_MAX;
-					T0_min = (T0_max - 15);
-				}
-			} else
-			{                              /* if subframe 2 or 4 */
-				/*--------------------------------------------------------------*
-				 * The pitch range for subframe 2 or 4 is encoded with 5 bits:  *
-				 *   T0_min  to T0_max     resolution 1/2 (frac = 0 or 2)       *
-				 *--------------------------------------------------------------*/
-				i = (T0 - T0_min);
-				index = (i << 1) + (T0_frac >> 1);
+                /* find T0_min and T0_max for subframe 2 and 4 */
+                T0_min = (T0 - 8);
+                if (T0_min < PIT_MIN)
+                {
+                    T0_min = PIT_MIN;
+                }
+                T0_max = T0_min + 15;
+                if (T0_max > PIT_MAX)
+                {
+                    T0_max = PIT_MAX;
+                    T0_min = (T0_max - 15);
+                }
+            } else
+            {                              /* if subframe 2 or 4 */
+                /*--------------------------------------------------------------*
+                 * The pitch range for subframe 2 or 4 is encoded with 5 bits:  *
+                 *   T0_min  to T0_max     resolution 1/2 (frac = 0 or 2)       *
+                 *--------------------------------------------------------------*/
+                i = (T0 - T0_min);
+                index = (i << 1) + (T0_frac >> 1);
 
-				Parm_serial(index, 5, &prms);
-			}
-		} else
-		{
-			T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
-					pit_flag, PIT_FR2, PIT_FR1_9b, L_SUBFR);
+                Parm_serial(index, 5, &prms);
+            }
+        } else
+        {
+            T0 = Pitch_fr4(&exc[i_subfr], xn, h1, T0_min, T0_max, &T0_frac,
+                    pit_flag, PIT_FR2, PIT_FR1_9b, L_SUBFR);
 
-			/* encode pitch lag */
-			if (pit_flag == 0)             /* if 1st/3rd subframe */
-			{
-				/*--------------------------------------------------------------*
-				 * The pitch range for the 1st/3rd subframe is encoded with     *
-				 * 9 bits and is divided as follows:                            *
-				 *   PIT_MIN to PIT_FR2-1  resolution 1/4 (frac = 0,1,2 or 3)   *
-				 *   PIT_FR2 to PIT_FR1-1  resolution 1/2 (frac = 0 or 1)       *
-				 *   PIT_FR1 to PIT_MAX    resolution 1   (frac = 0)            *
-				 *--------------------------------------------------------------*/
+            /* encode pitch lag */
+            if (pit_flag == 0)             /* if 1st/3rd subframe */
+            {
+                /*--------------------------------------------------------------*
+                 * The pitch range for the 1st/3rd subframe is encoded with     *
+                 * 9 bits and is divided as follows:                            *
+                 *   PIT_MIN to PIT_FR2-1  resolution 1/4 (frac = 0,1,2 or 3)   *
+                 *   PIT_FR2 to PIT_FR1-1  resolution 1/2 (frac = 0 or 1)       *
+                 *   PIT_FR1 to PIT_MAX    resolution 1   (frac = 0)            *
+                 *--------------------------------------------------------------*/
 
-				if (T0 < PIT_FR2)
-				{
-					index = ((T0 << 2) + T0_frac) - (PIT_MIN << 2);
-				} else if(T0 < PIT_FR1_9b)
-				{
-					index = ((((T0 << 1) + (T0_frac >> 1)) - (PIT_FR2<<1)) + ((PIT_FR2 - PIT_MIN)<<2));
-				} else
-				{
-					index = (((T0 - PIT_FR1_9b) + ((PIT_FR2 - PIT_MIN)<<2)) + ((PIT_FR1_9b - PIT_FR2)<<1));
-				}
+                if (T0 < PIT_FR2)
+                {
+                    index = ((T0 << 2) + T0_frac) - (PIT_MIN << 2);
+                } else if(T0 < PIT_FR1_9b)
+                {
+                    index = ((((T0 << 1) + (T0_frac >> 1)) - (PIT_FR2<<1)) + ((PIT_FR2 - PIT_MIN)<<2));
+                } else
+                {
+                    index = (((T0 - PIT_FR1_9b) + ((PIT_FR2 - PIT_MIN)<<2)) + ((PIT_FR1_9b - PIT_FR2)<<1));
+                }
 
-				Parm_serial(index, 9, &prms);
+                Parm_serial(index, 9, &prms);
 
-				/* find T0_min and T0_max for subframe 2 and 4 */
+                /* find T0_min and T0_max for subframe 2 and 4 */
 
-				T0_min = (T0 - 8);
-				if (T0_min < PIT_MIN)
-				{
-					T0_min = PIT_MIN;
-				}
-				T0_max = T0_min + 15;
+                T0_min = (T0 - 8);
+                if (T0_min < PIT_MIN)
+                {
+                    T0_min = PIT_MIN;
+                }
+                T0_max = T0_min + 15;
 
-				if (T0_max > PIT_MAX)
-				{
-					T0_max = PIT_MAX;
-					T0_min = (T0_max - 15);
-				}
-			} else
-			{                              /* if subframe 2 or 4 */
-				/*--------------------------------------------------------------*
-				 * The pitch range for subframe 2 or 4 is encoded with 6 bits:  *
-				 *   T0_min  to T0_max     resolution 1/4 (frac = 0,1,2 or 3)   *
-				 *--------------------------------------------------------------*/
-				i = (T0 - T0_min);
-				index = (i << 2) + T0_frac;
-				Parm_serial(index, 6, &prms);
-			}
-		}
+                if (T0_max > PIT_MAX)
+                {
+                    T0_max = PIT_MAX;
+                    T0_min = (T0_max - 15);
+                }
+            } else
+            {                              /* if subframe 2 or 4 */
+                /*--------------------------------------------------------------*
+                 * The pitch range for subframe 2 or 4 is encoded with 6 bits:  *
+                 *   T0_min  to T0_max     resolution 1/4 (frac = 0,1,2 or 3)   *
+                 *--------------------------------------------------------------*/
+                i = (T0 - T0_min);
+                index = (i << 2) + T0_frac;
+                Parm_serial(index, 6, &prms);
+            }
+        }
 
-		/*-----------------------------------------------------------------*
-		 * Gain clipping test to avoid unstable synthesis on frame erasure *
-		 *-----------------------------------------------------------------*/
+        /*-----------------------------------------------------------------*
+         * Gain clipping test to avoid unstable synthesis on frame erasure *
+         *-----------------------------------------------------------------*/
 
-		clip_gain = 0;
-		if((st->gp_clip[0] < 154) && (st->gp_clip[1] > 14746))
-			clip_gain = 1;
+        clip_gain = 0;
+        if((st->gp_clip[0] < 154) && (st->gp_clip[1] > 14746))
+            clip_gain = 1;
 
-		/*-----------------------------------------------------------------*
-		 * - find unity gain pitch excitation (adaptive codebook entry)    *
-		 *   with fractional interpolation.                                *
-		 * - find filtered pitch exc. y1[]=exc[] convolved with h1[])      *
-		 * - compute pitch gain1                                           *
-		 *-----------------------------------------------------------------*/
-		/* find pitch exitation */
+        /*-----------------------------------------------------------------*
+         * - find unity gain pitch excitation (adaptive codebook entry)    *
+         *   with fractional interpolation.                                *
+         * - find filtered pitch exc. y1[]=exc[] convolved with h1[])      *
+         * - compute pitch gain1                                           *
+         *-----------------------------------------------------------------*/
+        /* find pitch exitation */
 #ifdef ASM_OPT                  /* asm optimization branch */
-		pred_lt4_asm(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
+        pred_lt4_asm(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
 #else
-		Pred_lt4(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
+        Pred_lt4(&exc[i_subfr], T0, T0_frac, L_SUBFR + 1);
 #endif
-		if (*ser_size > NBBITS_9k)
-		{
+        if (*ser_size > NBBITS_9k)
+        {
 #ifdef ASM_OPT                   /* asm optimization branch */
-			Convolve_asm(&exc[i_subfr], h1, y1, L_SUBFR);
+            Convolve_asm(&exc[i_subfr], h1, y1, L_SUBFR);
 #else
-			Convolve(&exc[i_subfr], h1, y1, L_SUBFR);
+            Convolve(&exc[i_subfr], h1, y1, L_SUBFR);
 #endif
-			gain1 = G_pitch(xn, y1, g_coeff, L_SUBFR);
-			/* clip gain if necessary to avoid problem at decoder */
-			if ((clip_gain != 0) && (gain1 > GP_CLIP))
-			{
-				gain1 = GP_CLIP;
-			}
-			/* find energy of new target xn2[] */
-			Updt_tar(xn, dn, y1, gain1, L_SUBFR);       /* dn used temporary */
-		} else
-		{
-			gain1 = 0;
-		}
-		/*-----------------------------------------------------------------*
-		 * - find pitch excitation filtered by 1st order LP filter.        *
-		 * - find filtered pitch exc. y2[]=exc[] convolved with h1[])      *
-		 * - compute pitch gain2                                           *
-		 *-----------------------------------------------------------------*/
-		/* find pitch excitation with lp filter */
-		vo_p0 = exc + i_subfr-1;
-		vo_p1 = code;
-		/* find pitch excitation with lp filter */
-		for (i = 0; i < L_SUBFR/2; i++)
-		{
-			L_tmp = 5898 * *vo_p0++;
-			L_tmp1 = 5898 * *vo_p0;
-			L_tmp += 20972 * *vo_p0++;
-			L_tmp1 += 20972 * *vo_p0++;
-			L_tmp1 += 5898 * *vo_p0--;
-			L_tmp += 5898 * *vo_p0;
-			*vo_p1++ = (L_tmp + 0x4000)>>15;
-			*vo_p1++ = (L_tmp1 + 0x4000)>>15;
-		}
+            gain1 = G_pitch(xn, y1, g_coeff, L_SUBFR);
+            /* clip gain if necessary to avoid problem at decoder */
+            if ((clip_gain != 0) && (gain1 > GP_CLIP))
+            {
+                gain1 = GP_CLIP;
+            }
+            /* find energy of new target xn2[] */
+            Updt_tar(xn, dn, y1, gain1, L_SUBFR);       /* dn used temporary */
+        } else
+        {
+            gain1 = 0;
+        }
+        /*-----------------------------------------------------------------*
+         * - find pitch excitation filtered by 1st order LP filter.        *
+         * - find filtered pitch exc. y2[]=exc[] convolved with h1[])      *
+         * - compute pitch gain2                                           *
+         *-----------------------------------------------------------------*/
+        /* find pitch excitation with lp filter */
+        vo_p0 = exc + i_subfr-1;
+        vo_p1 = code;
+        /* find pitch excitation with lp filter */
+        for (i = 0; i < L_SUBFR/2; i++)
+        {
+            L_tmp = 5898 * *vo_p0++;
+            L_tmp1 = 5898 * *vo_p0;
+            L_tmp += 20972 * *vo_p0++;
+            L_tmp1 += 20972 * *vo_p0++;
+            L_tmp1 += 5898 * *vo_p0--;
+            L_tmp += 5898 * *vo_p0;
+            *vo_p1++ = (L_tmp + 0x4000)>>15;
+            *vo_p1++ = (L_tmp1 + 0x4000)>>15;
+        }
 
 #ifdef ASM_OPT                 /* asm optimization branch */
-		Convolve_asm(code, h1, y2, L_SUBFR);
+        Convolve_asm(code, h1, y2, L_SUBFR);
 #else
-		Convolve(code, h1, y2, L_SUBFR);
+        Convolve(code, h1, y2, L_SUBFR);
 #endif
 
-		gain2 = G_pitch(xn, y2, g_coeff2, L_SUBFR);
+        gain2 = G_pitch(xn, y2, g_coeff2, L_SUBFR);
 
-		/* clip gain if necessary to avoid problem at decoder */
-		if ((clip_gain != 0) && (gain2 > GP_CLIP))
-		{
-			gain2 = GP_CLIP;
-		}
-		/* find energy of new target xn2[] */
-		Updt_tar(xn, xn2, y2, gain2, L_SUBFR);
-		/*-----------------------------------------------------------------*
-		 * use the best prediction (minimise quadratic error).             *
-		 *-----------------------------------------------------------------*/
-		select = 0;
-		if(*ser_size > NBBITS_9k)
-		{
-			L_tmp = 0L;
-			vo_p0 = dn;
-			vo_p1 = xn2;
-			for (i = 0; i < L_SUBFR/2; i++)
-			{
-				L_tmp += *vo_p0 * *vo_p0;
-				vo_p0++;
-				L_tmp -= *vo_p1 * *vo_p1;
-				vo_p1++;
-				L_tmp += *vo_p0 * *vo_p0;
-				vo_p0++;
-				L_tmp -= *vo_p1 * *vo_p1;
-				vo_p1++;
-			}
+        /* clip gain if necessary to avoid problem at decoder */
+        if ((clip_gain != 0) && (gain2 > GP_CLIP))
+        {
+            gain2 = GP_CLIP;
+        }
+        /* find energy of new target xn2[] */
+        Updt_tar(xn, xn2, y2, gain2, L_SUBFR);
+        /*-----------------------------------------------------------------*
+         * use the best prediction (minimise quadratic error).             *
+         *-----------------------------------------------------------------*/
+        select = 0;
+        if(*ser_size > NBBITS_9k)
+        {
+            L_tmp = 0L;
+            vo_p0 = dn;
+            vo_p1 = xn2;
+            for (i = 0; i < L_SUBFR/2; i++)
+            {
+                L_tmp = L_add(L_tmp, *vo_p0 * *vo_p0);
+                vo_p0++;
+                L_tmp = L_sub(L_tmp, *vo_p1 * *vo_p1);
+                vo_p1++;
+                L_tmp = L_add(L_tmp, *vo_p0 * *vo_p0);
+                vo_p0++;
+                L_tmp = L_sub(L_tmp, *vo_p1 * *vo_p1);
+                vo_p1++;
+            }
 
-			if (L_tmp <= 0)
-			{
-				select = 1;
-			}
-			Parm_serial(select, 1, &prms);
-		}
-		if (select == 0)
-		{
-			/* use the lp filter for pitch excitation prediction */
-			gain_pit = gain2;
-			Copy(code, &exc[i_subfr], L_SUBFR);
-			Copy(y2, y1, L_SUBFR);
-			Copy(g_coeff2, g_coeff, 4);
-		} else
-		{
-			/* no filter used for pitch excitation prediction */
-			gain_pit = gain1;
-			Copy(dn, xn2, L_SUBFR);        /* target vector for codebook search */
-		}
-		/*-----------------------------------------------------------------*
-		 * - update cn[] for codebook search                               *
-		 *-----------------------------------------------------------------*/
-		Updt_tar(cn, cn, &exc[i_subfr], gain_pit, L_SUBFR);
+            if (L_tmp <= 0)
+            {
+                select = 1;
+            }
+            Parm_serial(select, 1, &prms);
+        }
+        if (select == 0)
+        {
+            /* use the lp filter for pitch excitation prediction */
+            gain_pit = gain2;
+            Copy(code, &exc[i_subfr], L_SUBFR);
+            Copy(y2, y1, L_SUBFR);
+            Copy(g_coeff2, g_coeff, 4);
+        } else
+        {
+            /* no filter used for pitch excitation prediction */
+            gain_pit = gain1;
+            Copy(dn, xn2, L_SUBFR);        /* target vector for codebook search */
+        }
+        /*-----------------------------------------------------------------*
+         * - update cn[] for codebook search                               *
+         *-----------------------------------------------------------------*/
+        Updt_tar(cn, cn, &exc[i_subfr], gain_pit, L_SUBFR);
 
 #ifdef  ASM_OPT                           /* asm optimization branch */
-		Scale_sig_opt(cn, L_SUBFR, shift);     /* scaling of cn[] to limit dynamic at 12 bits */
+        Scale_sig_opt(cn, L_SUBFR, shift);     /* scaling of cn[] to limit dynamic at 12 bits */
 #else
-		Scale_sig(cn, L_SUBFR, shift);     /* scaling of cn[] to limit dynamic at 12 bits */
+        Scale_sig(cn, L_SUBFR, shift);     /* scaling of cn[] to limit dynamic at 12 bits */
 #endif
-		/*-----------------------------------------------------------------*
-		 * - include fixed-gain pitch contribution into impulse resp. h1[] *
-		 *-----------------------------------------------------------------*/
-		tmp = 0;
-		Preemph(h2, st->tilt_code, L_SUBFR, &tmp);
+        /*-----------------------------------------------------------------*
+         * - include fixed-gain pitch contribution into impulse resp. h1[] *
+         *-----------------------------------------------------------------*/
+        tmp = 0;
+        Preemph(h2, st->tilt_code, L_SUBFR, &tmp);
 
-		if (T0_frac > 2)
-			T0 = (T0 + 1);
-		Pit_shrp(h2, T0, PIT_SHARP, L_SUBFR);
-		/*-----------------------------------------------------------------*
-		 * - Correlation between target xn2[] and impulse response h1[]    *
-		 * - Innovative codebook search                                    *
-		 *-----------------------------------------------------------------*/
-		cor_h_x(h2, xn2, dn);
-		if (*ser_size <= NBBITS_7k)
-		{
-			ACELP_2t64_fx(dn, cn, h2, code, y2, indice);
+        if (T0_frac > 2)
+            T0 = (T0 + 1);
+        Pit_shrp(h2, T0, PIT_SHARP, L_SUBFR);
+        /*-----------------------------------------------------------------*
+         * - Correlation between target xn2[] and impulse response h1[]    *
+         * - Innovative codebook search                                    *
+         *-----------------------------------------------------------------*/
+        cor_h_x(h2, xn2, dn);
+        if (*ser_size <= NBBITS_7k)
+        {
+            ACELP_2t64_fx(dn, cn, h2, code, y2, indice);
 
-			Parm_serial(indice[0], 12, &prms);
-		} else if(*ser_size <= NBBITS_9k)
-		{
-			ACELP_4t64_fx(dn, cn, h2, code, y2, 20, *ser_size, indice);
+            Parm_serial(indice[0], 12, &prms);
+        } else if(*ser_size <= NBBITS_9k)
+        {
+            ACELP_4t64_fx(dn, cn, h2, code, y2, 20, *ser_size, indice);
 
-			Parm_serial(indice[0], 5, &prms);
-			Parm_serial(indice[1], 5, &prms);
-			Parm_serial(indice[2], 5, &prms);
-			Parm_serial(indice[3], 5, &prms);
-		} else if(*ser_size <= NBBITS_12k)
-		{
-			ACELP_4t64_fx(dn, cn, h2, code, y2, 36, *ser_size, indice);
+            Parm_serial(indice[0], 5, &prms);
+            Parm_serial(indice[1], 5, &prms);
+            Parm_serial(indice[2], 5, &prms);
+            Parm_serial(indice[3], 5, &prms);
+        } else if(*ser_size <= NBBITS_12k)
+        {
+            ACELP_4t64_fx(dn, cn, h2, code, y2, 36, *ser_size, indice);
 
-			Parm_serial(indice[0], 9, &prms);
-			Parm_serial(indice[1], 9, &prms);
-			Parm_serial(indice[2], 9, &prms);
-			Parm_serial(indice[3], 9, &prms);
-		} else if(*ser_size <= NBBITS_14k)
-		{
-			ACELP_4t64_fx(dn, cn, h2, code, y2, 44, *ser_size, indice);
+            Parm_serial(indice[0], 9, &prms);
+            Parm_serial(indice[1], 9, &prms);
+            Parm_serial(indice[2], 9, &prms);
+            Parm_serial(indice[3], 9, &prms);
+        } else if(*ser_size <= NBBITS_14k)
+        {
+            ACELP_4t64_fx(dn, cn, h2, code, y2, 44, *ser_size, indice);
 
-			Parm_serial(indice[0], 13, &prms);
-			Parm_serial(indice[1], 13, &prms);
-			Parm_serial(indice[2], 9, &prms);
-			Parm_serial(indice[3], 9, &prms);
-		} else if(*ser_size <= NBBITS_16k)
-		{
-			ACELP_4t64_fx(dn, cn, h2, code, y2, 52, *ser_size, indice);
+            Parm_serial(indice[0], 13, &prms);
+            Parm_serial(indice[1], 13, &prms);
+            Parm_serial(indice[2], 9, &prms);
+            Parm_serial(indice[3], 9, &prms);
+        } else if(*ser_size <= NBBITS_16k)
+        {
+            ACELP_4t64_fx(dn, cn, h2, code, y2, 52, *ser_size, indice);
 
-			Parm_serial(indice[0], 13, &prms);
-			Parm_serial(indice[1], 13, &prms);
-			Parm_serial(indice[2], 13, &prms);
-			Parm_serial(indice[3], 13, &prms);
-		} else if(*ser_size <= NBBITS_18k)
-		{
-			ACELP_4t64_fx(dn, cn, h2, code, y2, 64, *ser_size, indice);
+            Parm_serial(indice[0], 13, &prms);
+            Parm_serial(indice[1], 13, &prms);
+            Parm_serial(indice[2], 13, &prms);
+            Parm_serial(indice[3], 13, &prms);
+        } else if(*ser_size <= NBBITS_18k)
+        {
+            ACELP_4t64_fx(dn, cn, h2, code, y2, 64, *ser_size, indice);
 
-			Parm_serial(indice[0], 2, &prms);
-			Parm_serial(indice[1], 2, &prms);
-			Parm_serial(indice[2], 2, &prms);
-			Parm_serial(indice[3], 2, &prms);
-			Parm_serial(indice[4], 14, &prms);
-			Parm_serial(indice[5], 14, &prms);
-			Parm_serial(indice[6], 14, &prms);
-			Parm_serial(indice[7], 14, &prms);
-		} else if(*ser_size <= NBBITS_20k)
-		{
-			ACELP_4t64_fx(dn, cn, h2, code, y2, 72, *ser_size, indice);
+            Parm_serial(indice[0], 2, &prms);
+            Parm_serial(indice[1], 2, &prms);
+            Parm_serial(indice[2], 2, &prms);
+            Parm_serial(indice[3], 2, &prms);
+            Parm_serial(indice[4], 14, &prms);
+            Parm_serial(indice[5], 14, &prms);
+            Parm_serial(indice[6], 14, &prms);
+            Parm_serial(indice[7], 14, &prms);
+        } else if(*ser_size <= NBBITS_20k)
+        {
+            ACELP_4t64_fx(dn, cn, h2, code, y2, 72, *ser_size, indice);
 
-			Parm_serial(indice[0], 10, &prms);
-			Parm_serial(indice[1], 10, &prms);
-			Parm_serial(indice[2], 2, &prms);
-			Parm_serial(indice[3], 2, &prms);
-			Parm_serial(indice[4], 10, &prms);
-			Parm_serial(indice[5], 10, &prms);
-			Parm_serial(indice[6], 14, &prms);
-			Parm_serial(indice[7], 14, &prms);
-		} else
-		{
-			ACELP_4t64_fx(dn, cn, h2, code, y2, 88, *ser_size, indice);
+            Parm_serial(indice[0], 10, &prms);
+            Parm_serial(indice[1], 10, &prms);
+            Parm_serial(indice[2], 2, &prms);
+            Parm_serial(indice[3], 2, &prms);
+            Parm_serial(indice[4], 10, &prms);
+            Parm_serial(indice[5], 10, &prms);
+            Parm_serial(indice[6], 14, &prms);
+            Parm_serial(indice[7], 14, &prms);
+        } else
+        {
+            ACELP_4t64_fx(dn, cn, h2, code, y2, 88, *ser_size, indice);
 
-			Parm_serial(indice[0], 11, &prms);
-			Parm_serial(indice[1], 11, &prms);
-			Parm_serial(indice[2], 11, &prms);
-			Parm_serial(indice[3], 11, &prms);
-			Parm_serial(indice[4], 11, &prms);
-			Parm_serial(indice[5], 11, &prms);
-			Parm_serial(indice[6], 11, &prms);
-			Parm_serial(indice[7], 11, &prms);
-		}
-		/*-------------------------------------------------------*
-		 * - Add the fixed-gain pitch contribution to code[].    *
-		 *-------------------------------------------------------*/
-		tmp = 0;
-		Preemph(code, st->tilt_code, L_SUBFR, &tmp);
-		Pit_shrp(code, T0, PIT_SHARP, L_SUBFR);
-		/*----------------------------------------------------------*
-		 *  - Compute the fixed codebook gain                       *
-		 *  - quantize fixed codebook gain                          *
-		 *----------------------------------------------------------*/
-		if(*ser_size <= NBBITS_9k)
-		{
-			index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 6,
-					&gain_pit, &L_gain_code, clip_gain, st->qua_gain);
-			Parm_serial(index, 6, &prms);
-		} else
-		{
-			index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 7,
-					&gain_pit, &L_gain_code, clip_gain, st->qua_gain);
-			Parm_serial(index, 7, &prms);
-		}
-		/* test quantized gain of pitch for pitch clipping algorithm */
-		Gp_clip_test_gain_pit(gain_pit, st->gp_clip);
+            Parm_serial(indice[0], 11, &prms);
+            Parm_serial(indice[1], 11, &prms);
+            Parm_serial(indice[2], 11, &prms);
+            Parm_serial(indice[3], 11, &prms);
+            Parm_serial(indice[4], 11, &prms);
+            Parm_serial(indice[5], 11, &prms);
+            Parm_serial(indice[6], 11, &prms);
+            Parm_serial(indice[7], 11, &prms);
+        }
+        /*-------------------------------------------------------*
+         * - Add the fixed-gain pitch contribution to code[].    *
+         *-------------------------------------------------------*/
+        tmp = 0;
+        Preemph(code, st->tilt_code, L_SUBFR, &tmp);
+        Pit_shrp(code, T0, PIT_SHARP, L_SUBFR);
+        /*----------------------------------------------------------*
+         *  - Compute the fixed codebook gain                       *
+         *  - quantize fixed codebook gain                          *
+         *----------------------------------------------------------*/
+        if(*ser_size <= NBBITS_9k)
+        {
+            index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 6,
+                    &gain_pit, &L_gain_code, clip_gain, st->qua_gain);
+            Parm_serial(index, 6, &prms);
+        } else
+        {
+            index = Q_gain2(xn, y1, Q_new + shift, y2, code, g_coeff, L_SUBFR, 7,
+                    &gain_pit, &L_gain_code, clip_gain, st->qua_gain);
+            Parm_serial(index, 7, &prms);
+        }
+        /* test quantized gain of pitch for pitch clipping algorithm */
+        Gp_clip_test_gain_pit(gain_pit, st->gp_clip);
 
-		L_tmp = L_shl(L_gain_code, Q_new);
-		gain_code = extract_h(L_add(L_tmp, 0x8000));
+        L_tmp = L_shl(L_gain_code, Q_new);
+        gain_code = extract_h(L_add(L_tmp, 0x8000));
 
-		/*----------------------------------------------------------*
-		 * Update parameters for the next subframe.                 *
-		 * - tilt of code: 0.0 (unvoiced) to 0.5 (voiced)           *
-		 *----------------------------------------------------------*/
-		/* find voice factor in Q15 (1=voiced, -1=unvoiced) */
-		Copy(&exc[i_subfr], exc2, L_SUBFR);
+        /*----------------------------------------------------------*
+         * Update parameters for the next subframe.                 *
+         * - tilt of code: 0.0 (unvoiced) to 0.5 (voiced)           *
+         *----------------------------------------------------------*/
+        /* find voice factor in Q15 (1=voiced, -1=unvoiced) */
+        Copy(&exc[i_subfr], exc2, L_SUBFR);
 
 #ifdef ASM_OPT                           /* asm optimization branch */
-		Scale_sig_opt(exc2, L_SUBFR, shift);
+        Scale_sig_opt(exc2, L_SUBFR, shift);
 #else
-		Scale_sig(exc2, L_SUBFR, shift);
+        Scale_sig(exc2, L_SUBFR, shift);
 #endif
-		voice_fac = voice_factor(exc2, shift, gain_pit, code, gain_code, L_SUBFR);
-		/* tilt of code for next subframe: 0.5=voiced, 0=unvoiced */
-		st->tilt_code = ((voice_fac >> 2) + 8192);
-		/*------------------------------------------------------*
-		 * - Update filter's memory "mem_w0" for finding the    *
-		 *   target vector in the next subframe.                *
-		 * - Find the total excitation                          *
-		 * - Find synthesis speech to update mem_syn[].         *
-		 *------------------------------------------------------*/
+        voice_fac = voice_factor(exc2, shift, gain_pit, code, gain_code, L_SUBFR);
+        /* tilt of code for next subframe: 0.5=voiced, 0=unvoiced */
+        st->tilt_code = ((voice_fac >> 2) + 8192);
+        /*------------------------------------------------------*
+         * - Update filter's memory "mem_w0" for finding the    *
+         *   target vector in the next subframe.                *
+         * - Find the total excitation                          *
+         * - Find synthesis speech to update mem_syn[].         *
+         *------------------------------------------------------*/
 
-		/* y2 in Q9, gain_pit in Q14 */
-		L_tmp = (gain_code * y2[L_SUBFR - 1])<<1;
-		L_tmp = L_shl(L_tmp, (5 + shift));
-		L_tmp = L_negate(L_tmp);
-		L_tmp += (xn[L_SUBFR - 1] * 16384)<<1;
-		L_tmp -= (y1[L_SUBFR - 1] * gain_pit)<<1;
-		L_tmp = L_shl(L_tmp, (1 - shift));
-		st->mem_w0 = extract_h(L_add(L_tmp, 0x8000));
+        /* y2 in Q9, gain_pit in Q14 */
+        L_tmp = L_mult(gain_code, y2[L_SUBFR - 1]);
+        L_tmp = L_shl(L_tmp, (5 + shift));
+        L_tmp = L_negate(L_tmp);
+        L_tmp += (xn[L_SUBFR - 1] * 16384)<<1;
+        L_tmp -= (y1[L_SUBFR - 1] * gain_pit)<<1;
+        L_tmp = L_shl(L_tmp, (1 - shift));
+        st->mem_w0 = extract_h(L_add(L_tmp, 0x8000));
 
-		if (*ser_size >= NBBITS_24k)
-			Copy(&exc[i_subfr], exc2, L_SUBFR);
+        if (*ser_size >= NBBITS_24k)
+            Copy(&exc[i_subfr], exc2, L_SUBFR);
 
-		for (i = 0; i < L_SUBFR; i++)
-		{
-			/* code in Q9, gain_pit in Q14 */
-			L_tmp = (gain_code * code[i])<<1;
-			L_tmp = (L_tmp << 5);
-			L_tmp += (exc[i + i_subfr] * gain_pit)<<1;
-			L_tmp = L_shl2(L_tmp, 1);
-			exc[i + i_subfr] = extract_h(L_add(L_tmp, 0x8000));
-		}
+        for (i = 0; i < L_SUBFR; i++)
+        {
+            Word32 tmp;
+            /* code in Q9, gain_pit in Q14 */
+            L_tmp = L_mult(gain_code, code[i]);
+            L_tmp = L_shl(L_tmp, 5);
+            tmp = L_mult(exc[i + i_subfr], gain_pit); // (exc[i + i_subfr] * gain_pit)<<1
+            L_tmp = L_add(L_tmp, tmp);
+            L_tmp = L_shl2(L_tmp, 1);
+            exc[i + i_subfr] = extract_h(L_add(L_tmp, 0x8000));
+        }
 
-		Syn_filt(p_Aq,&exc[i_subfr], synth, L_SUBFR, st->mem_syn, 1);
+        Syn_filt(p_Aq,&exc[i_subfr], synth, L_SUBFR, st->mem_syn, 1);
 
-		if(*ser_size >= NBBITS_24k)
-		{
-			/*------------------------------------------------------------*
-			 * phase dispersion to enhance noise in low bit rate          *
-			 *------------------------------------------------------------*/
-			/* L_gain_code in Q16 */
-			VO_L_Extract(L_gain_code, &gain_code, &gain_code_lo);
+        if(*ser_size >= NBBITS_24k)
+        {
+            /*------------------------------------------------------------*
+             * phase dispersion to enhance noise in low bit rate          *
+             *------------------------------------------------------------*/
+            /* L_gain_code in Q16 */
+            VO_L_Extract(L_gain_code, &gain_code, &gain_code_lo);
 
-			/*------------------------------------------------------------*
-			 * noise enhancer                                             *
-			 * ~~~~~~~~~~~~~~                                             *
-			 * - Enhance excitation on noise. (modify gain of code)       *
-			 *   If signal is noisy and LPC filter is stable, move gain   *
-			 *   of code 1.5 dB toward gain of code threshold.            *
-			 *   This decrease by 3 dB noise energy variation.            *
-			 *------------------------------------------------------------*/
-			tmp = (16384 - (voice_fac >> 1));        /* 1=unvoiced, 0=voiced */
-			fac = vo_mult(stab_fac, tmp);
-			L_tmp = L_gain_code;
-			if(L_tmp < st->L_gc_thres)
-			{
-				L_tmp = vo_L_add(L_tmp, Mpy_32_16(gain_code, gain_code_lo, 6226));
-				if(L_tmp > st->L_gc_thres)
-				{
-					L_tmp = st->L_gc_thres;
-				}
-			} else
-			{
-				L_tmp = Mpy_32_16(gain_code, gain_code_lo, 27536);
-				if(L_tmp < st->L_gc_thres)
-				{
-					L_tmp = st->L_gc_thres;
-				}
-			}
-			st->L_gc_thres = L_tmp;
+            /*------------------------------------------------------------*
+             * noise enhancer                                             *
+             * ~~~~~~~~~~~~~~                                             *
+             * - Enhance excitation on noise. (modify gain of code)       *
+             *   If signal is noisy and LPC filter is stable, move gain   *
+             *   of code 1.5 dB toward gain of code threshold.            *
+             *   This decrease by 3 dB noise energy variation.            *
+             *------------------------------------------------------------*/
+            tmp = (16384 - (voice_fac >> 1));        /* 1=unvoiced, 0=voiced */
+            fac = vo_mult(stab_fac, tmp);
+            L_tmp = L_gain_code;
+            if(L_tmp < st->L_gc_thres)
+            {
+                L_tmp = vo_L_add(L_tmp, Mpy_32_16(gain_code, gain_code_lo, 6226));
+                if(L_tmp > st->L_gc_thres)
+                {
+                    L_tmp = st->L_gc_thres;
+                }
+            } else
+            {
+                L_tmp = Mpy_32_16(gain_code, gain_code_lo, 27536);
+                if(L_tmp < st->L_gc_thres)
+                {
+                    L_tmp = st->L_gc_thres;
+                }
+            }
+            st->L_gc_thres = L_tmp;
 
-			L_gain_code = Mpy_32_16(gain_code, gain_code_lo, (32767 - fac));
-			VO_L_Extract(L_tmp, &gain_code, &gain_code_lo);
-			L_gain_code = vo_L_add(L_gain_code, Mpy_32_16(gain_code, gain_code_lo, fac));
+            L_gain_code = Mpy_32_16(gain_code, gain_code_lo, (32767 - fac));
+            VO_L_Extract(L_tmp, &gain_code, &gain_code_lo);
+            L_gain_code = vo_L_add(L_gain_code, Mpy_32_16(gain_code, gain_code_lo, fac));
 
-			/*------------------------------------------------------------*
-			 * pitch enhancer                                             *
-			 * ~~~~~~~~~~~~~~                                             *
-			 * - Enhance excitation on voice. (HP filtering of code)      *
-			 *   On voiced signal, filtering of code by a smooth fir HP   *
-			 *   filter to decrease energy of code in low frequency.      *
-			 *------------------------------------------------------------*/
+            /*------------------------------------------------------------*
+             * pitch enhancer                                             *
+             * ~~~~~~~~~~~~~~                                             *
+             * - Enhance excitation on voice. (HP filtering of code)      *
+             *   On voiced signal, filtering of code by a smooth fir HP   *
+             *   filter to decrease energy of code in low frequency.      *
+             *------------------------------------------------------------*/
 
-			tmp = ((voice_fac >> 3) + 4096); /* 0.25=voiced, 0=unvoiced */
+            tmp = ((voice_fac >> 3) + 4096); /* 0.25=voiced, 0=unvoiced */
 
-			L_tmp = L_deposit_h(code[0]);
-			L_tmp -= (code[1] * tmp)<<1;
-			code2[0] = vo_round(L_tmp);
+            L_tmp = L_deposit_h(code[0]);
+            L_tmp -= (code[1] * tmp)<<1;
+            code2[0] = vo_round(L_tmp);
 
-			for (i = 1; i < L_SUBFR - 1; i++)
-			{
-				L_tmp = L_deposit_h(code[i]);
-				L_tmp -= (code[i + 1] * tmp)<<1;
-				L_tmp -= (code[i - 1] * tmp)<<1;
-				code2[i] = vo_round(L_tmp);
-			}
+            for (i = 1; i < L_SUBFR - 1; i++)
+            {
+                L_tmp = L_deposit_h(code[i]);
+                L_tmp -= (code[i + 1] * tmp)<<1;
+                L_tmp -= (code[i - 1] * tmp)<<1;
+                code2[i] = vo_round(L_tmp);
+            }
 
-			L_tmp = L_deposit_h(code[L_SUBFR - 1]);
-			L_tmp -= (code[L_SUBFR - 2] * tmp)<<1;
-			code2[L_SUBFR - 1] = vo_round(L_tmp);
+            L_tmp = L_deposit_h(code[L_SUBFR - 1]);
+            L_tmp -= (code[L_SUBFR - 2] * tmp)<<1;
+            code2[L_SUBFR - 1] = vo_round(L_tmp);
 
-			/* build excitation */
-			gain_code = vo_round(L_shl(L_gain_code, Q_new));
+            /* build excitation */
+            gain_code = vo_round(L_shl(L_gain_code, Q_new));
 
-			for (i = 0; i < L_SUBFR; i++)
-			{
-				L_tmp = (code2[i] * gain_code)<<1;
-				L_tmp = (L_tmp << 5);
-				L_tmp += (exc2[i] * gain_pit)<<1;
-				L_tmp = (L_tmp << 1);
-				exc2[i] = vo_round(L_tmp);
-			}
+            for (i = 0; i < L_SUBFR; i++)
+            {
+                L_tmp = L_mult(code2[i], gain_code);
+                L_tmp = L_shl(L_tmp, 5);
+                L_tmp = L_add(L_tmp, L_mult(exc2[i], gain_pit));
+                L_tmp = L_shl(L_tmp, 1);
+                exc2[i] = voround(L_tmp);
+            }
 
-			corr_gain = synthesis(p_Aq, exc2, Q_new, &speech16k[i_subfr * 5 / 4], st);
-			Parm_serial(corr_gain, 4, &prms);
-		}
-		p_A += (M + 1);
-		p_Aq += (M + 1);
-	}                                      /* end of subframe loop */
+            corr_gain = synthesis(p_Aq, exc2, Q_new, &speech16k[i_subfr * 5 / 4], st);
+            Parm_serial(corr_gain, 4, &prms);
+        }
+        p_A += (M + 1);
+        p_Aq += (M + 1);
+    }                                      /* end of subframe loop */
 
-	/*--------------------------------------------------*
-	 * Update signal for next frame.                    *
-	 * -> save past of speech[], wsp[] and exc[].       *
-	 *--------------------------------------------------*/
-	Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
-	Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
-	Copy(&old_exc[L_FRAME], st->old_exc, PIT_MAX + L_INTERPOL);
-	return;
+    /*--------------------------------------------------*
+     * Update signal for next frame.                    *
+     * -> save past of speech[], wsp[] and exc[].       *
+     *--------------------------------------------------*/
+    Copy(&old_speech[L_FRAME], st->old_speech, L_TOTAL - L_FRAME);
+    Copy(&old_wsp[L_FRAME / OPL_DECIM], st->old_wsp, PIT_MAX / OPL_DECIM);
+    Copy(&old_exc[L_FRAME], st->old_exc, PIT_MAX + L_INTERPOL);
+    return;
 }
 
 /*-----------------------------------------------------*
@@ -1329,225 +1333,225 @@
 *-----------------------------------------------------*/
 
 static Word16 synthesis(
-		Word16 Aq[],                          /* A(z)  : quantized Az               */
-		Word16 exc[],                         /* (i)   : excitation at 12kHz        */
-		Word16 Q_new,                         /* (i)   : scaling performed on exc   */
-		Word16 synth16k[],                    /* (o)   : 16kHz synthesis signal     */
-		Coder_State * st                      /* (i/o) : State structure            */
-		)
+        Word16 Aq[],                          /* A(z)  : quantized Az               */
+        Word16 exc[],                         /* (i)   : excitation at 12kHz        */
+        Word16 Q_new,                         /* (i)   : scaling performed on exc   */
+        Word16 synth16k[],                    /* (o)   : 16kHz synthesis signal     */
+        Coder_State * st                      /* (i/o) : State structure            */
+        )
 {
-	Word16 fac, tmp, exp;
-	Word16 ener, exp_ener;
-	Word32 L_tmp, i;
+    Word16 fac, tmp, exp;
+    Word16 ener, exp_ener;
+    Word32 L_tmp, i;
 
-	Word16 synth_hi[M + L_SUBFR], synth_lo[M + L_SUBFR];
-	Word16 synth[L_SUBFR];
-	Word16 HF[L_SUBFR16k];                 /* High Frequency vector      */
-	Word16 Ap[M + 1];
+    Word16 synth_hi[M + L_SUBFR], synth_lo[M + L_SUBFR];
+    Word16 synth[L_SUBFR];
+    Word16 HF[L_SUBFR16k];                 /* High Frequency vector      */
+    Word16 Ap[M + 1];
 
-	Word16 HF_SP[L_SUBFR16k];              /* High Frequency vector (from original signal) */
+    Word16 HF_SP[L_SUBFR16k];              /* High Frequency vector (from original signal) */
 
-	Word16 HP_est_gain, HP_calc_gain, HP_corr_gain;
-	Word16 dist_min, dist;
-	Word16 HP_gain_ind = 0;
-	Word16 gain1, gain2;
-	Word16 weight1, weight2;
+    Word16 HP_est_gain, HP_calc_gain, HP_corr_gain;
+    Word16 dist_min, dist;
+    Word16 HP_gain_ind = 0;
+    Word16 gain1, gain2;
+    Word16 weight1, weight2;
 
-	/*------------------------------------------------------------*
-	 * speech synthesis                                           *
-	 * ~~~~~~~~~~~~~~~~                                           *
-	 * - Find synthesis speech corresponding to exc2[].           *
-	 * - Perform fixed deemphasis and hp 50hz filtering.          *
-	 * - Oversampling from 12.8kHz to 16kHz.                      *
-	 *------------------------------------------------------------*/
-	Copy(st->mem_syn_hi, synth_hi, M);
-	Copy(st->mem_syn_lo, synth_lo, M);
+    /*------------------------------------------------------------*
+     * speech synthesis                                           *
+     * ~~~~~~~~~~~~~~~~                                           *
+     * - Find synthesis speech corresponding to exc2[].           *
+     * - Perform fixed deemphasis and hp 50hz filtering.          *
+     * - Oversampling from 12.8kHz to 16kHz.                      *
+     *------------------------------------------------------------*/
+    Copy(st->mem_syn_hi, synth_hi, M);
+    Copy(st->mem_syn_lo, synth_lo, M);
 
 #ifdef ASM_OPT                 /* asm optimization branch */
-	Syn_filt_32_asm(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
+    Syn_filt_32_asm(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
 #else
-	Syn_filt_32(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
+    Syn_filt_32(Aq, M, exc, Q_new, synth_hi + M, synth_lo + M, L_SUBFR);
 #endif
 
-	Copy(synth_hi + L_SUBFR, st->mem_syn_hi, M);
-	Copy(synth_lo + L_SUBFR, st->mem_syn_lo, M);
+    Copy(synth_hi + L_SUBFR, st->mem_syn_hi, M);
+    Copy(synth_lo + L_SUBFR, st->mem_syn_lo, M);
 
 #ifdef ASM_OPT                 /* asm optimization branch */
-	Deemph_32_asm(synth_hi + M, synth_lo + M, synth, &(st->mem_deemph));
+    Deemph_32_asm(synth_hi + M, synth_lo + M, synth, &(st->mem_deemph));
 #else
-	Deemph_32(synth_hi + M, synth_lo + M, synth, PREEMPH_FAC, L_SUBFR, &(st->mem_deemph));
+    Deemph_32(synth_hi + M, synth_lo + M, synth, PREEMPH_FAC, L_SUBFR, &(st->mem_deemph));
 #endif
 
-	HP50_12k8(synth, L_SUBFR, st->mem_sig_out);
+    HP50_12k8(synth, L_SUBFR, st->mem_sig_out);
 
-	/* Original speech signal as reference for high band gain quantisation */
-	for (i = 0; i < L_SUBFR16k; i++)
-	{
-		HF_SP[i] = synth16k[i];
-	}
+    /* Original speech signal as reference for high band gain quantisation */
+    for (i = 0; i < L_SUBFR16k; i++)
+    {
+        HF_SP[i] = synth16k[i];
+    }
 
-	/*------------------------------------------------------*
-	 * HF noise synthesis                                   *
-	 * ~~~~~~~~~~~~~~~~~~                                   *
-	 * - Generate HF noise between 5.5 and 7.5 kHz.         *
-	 * - Set energy of noise according to synthesis tilt.   *
-	 *     tilt > 0.8 ==> - 14 dB (voiced)                  *
-	 *     tilt   0.5 ==> - 6 dB  (voiced or noise)         *
-	 *     tilt < 0.0 ==>   0 dB  (noise)                   *
-	 *------------------------------------------------------*/
-	/* generate white noise vector */
-	for (i = 0; i < L_SUBFR16k; i++)
-	{
-		HF[i] = Random(&(st->seed2))>>3;
-	}
-	/* energy of excitation */
+    /*------------------------------------------------------*
+     * HF noise synthesis                                   *
+     * ~~~~~~~~~~~~~~~~~~                                   *
+     * - Generate HF noise between 5.5 and 7.5 kHz.         *
+     * - Set energy of noise according to synthesis tilt.   *
+     *     tilt > 0.8 ==> - 14 dB (voiced)                  *
+     *     tilt   0.5 ==> - 6 dB  (voiced or noise)         *
+     *     tilt < 0.0 ==>   0 dB  (noise)                   *
+     *------------------------------------------------------*/
+    /* generate white noise vector */
+    for (i = 0; i < L_SUBFR16k; i++)
+    {
+        HF[i] = Random(&(st->seed2))>>3;
+    }
+    /* energy of excitation */
 #ifdef ASM_OPT                    /* asm optimization branch */
-	Scale_sig_opt(exc, L_SUBFR, -3);
-	Q_new = Q_new - 3;
-	ener = extract_h(Dot_product12_asm(exc, exc, L_SUBFR, &exp_ener));
+    Scale_sig_opt(exc, L_SUBFR, -3);
+    Q_new = Q_new - 3;
+    ener = extract_h(Dot_product12_asm(exc, exc, L_SUBFR, &exp_ener));
 #else
-	Scale_sig(exc, L_SUBFR, -3);
-	Q_new = Q_new - 3;
-	ener = extract_h(Dot_product12(exc, exc, L_SUBFR, &exp_ener));
+    Scale_sig(exc, L_SUBFR, -3);
+    Q_new = Q_new - 3;
+    ener = extract_h(Dot_product12(exc, exc, L_SUBFR, &exp_ener));
 #endif
 
-	exp_ener = exp_ener - (Q_new + Q_new);
-	/* set energy of white noise to energy of excitation */
+    exp_ener = exp_ener - (Q_new + Q_new);
+    /* set energy of white noise to energy of excitation */
 #ifdef ASM_OPT              /* asm optimization branch */
-	tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
+    tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
 #else
-	tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
+    tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
 #endif
 
-	if(tmp > ener)
-	{
-		tmp = (tmp >> 1);                 /* Be sure tmp < ener */
-		exp = (exp + 1);
-	}
-	L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
-	exp = (exp - exp_ener);
-	Isqrt_n(&L_tmp, &exp);
-	L_tmp = L_shl(L_tmp, (exp + 1));       /* L_tmp x 2, L_tmp in Q31 */
-	tmp = extract_h(L_tmp);                /* tmp = 2 x sqrt(ener_exc/ener_hf) */
+    if(tmp > ener)
+    {
+        tmp = (tmp >> 1);                 /* Be sure tmp < ener */
+        exp = (exp + 1);
+    }
+    L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
+    exp = (exp - exp_ener);
+    Isqrt_n(&L_tmp, &exp);
+    L_tmp = L_shl(L_tmp, (exp + 1));       /* L_tmp x 2, L_tmp in Q31 */
+    tmp = extract_h(L_tmp);                /* tmp = 2 x sqrt(ener_exc/ener_hf) */
 
-	for (i = 0; i < L_SUBFR16k; i++)
-	{
-		HF[i] = vo_mult(HF[i], tmp);
-	}
+    for (i = 0; i < L_SUBFR16k; i++)
+    {
+        HF[i] = vo_mult(HF[i], tmp);
+    }
 
-	/* find tilt of synthesis speech (tilt: 1=voiced, -1=unvoiced) */
-	HP400_12k8(synth, L_SUBFR, st->mem_hp400);
+    /* find tilt of synthesis speech (tilt: 1=voiced, -1=unvoiced) */
+    HP400_12k8(synth, L_SUBFR, st->mem_hp400);
 
-	L_tmp = 1L;
-	for (i = 0; i < L_SUBFR; i++)
-		L_tmp += (synth[i] * synth[i])<<1;
+    L_tmp = 1L;
+    for (i = 0; i < L_SUBFR; i++)
+        L_tmp += (synth[i] * synth[i])<<1;
 
-	exp = norm_l(L_tmp);
-	ener = extract_h(L_tmp << exp);   /* ener = r[0] */
+    exp = norm_l(L_tmp);
+    ener = extract_h(L_tmp << exp);   /* ener = r[0] */
 
-	L_tmp = 1L;
-	for (i = 1; i < L_SUBFR; i++)
-		L_tmp +=(synth[i] * synth[i - 1])<<1;
+    L_tmp = 1L;
+    for (i = 1; i < L_SUBFR; i++)
+        L_tmp +=(synth[i] * synth[i - 1])<<1;
 
-	tmp = extract_h(L_tmp << exp);    /* tmp = r[1] */
+    tmp = extract_h(L_tmp << exp);    /* tmp = r[1] */
 
-	if (tmp > 0)
-	{
-		fac = div_s(tmp, ener);
-	} else
-	{
-		fac = 0;
-	}
+    if (tmp > 0)
+    {
+        fac = div_s(tmp, ener);
+    } else
+    {
+        fac = 0;
+    }
 
-	/* modify energy of white noise according to synthesis tilt */
-	gain1 = 32767 - fac;
-	gain2 = vo_mult(gain1, 20480);
-	gain2 = shl(gain2, 1);
+    /* modify energy of white noise according to synthesis tilt */
+    gain1 = 32767 - fac;
+    gain2 = vo_mult(gain1, 20480);
+    gain2 = shl(gain2, 1);
 
-	if (st->vad_hist > 0)
-	{
-		weight1 = 0;
-		weight2 = 32767;
-	} else
-	{
-		weight1 = 32767;
-		weight2 = 0;
-	}
-	tmp = vo_mult(weight1, gain1);
-	tmp = add1(tmp, vo_mult(weight2, gain2));
+    if (st->vad_hist > 0)
+    {
+        weight1 = 0;
+        weight2 = 32767;
+    } else
+    {
+        weight1 = 32767;
+        weight2 = 0;
+    }
+    tmp = vo_mult(weight1, gain1);
+    tmp = add1(tmp, vo_mult(weight2, gain2));
 
-	if (tmp != 0)
-	{
-		tmp = (tmp + 1);
-	}
-	HP_est_gain = tmp;
+    if (tmp != 0)
+    {
+        tmp = (tmp + 1);
+    }
+    HP_est_gain = tmp;
 
-	if(HP_est_gain < 3277)
-	{
-		HP_est_gain = 3277;                /* 0.1 in Q15 */
-	}
-	/* synthesis of noise: 4.8kHz..5.6kHz --> 6kHz..7kHz */
-	Weight_a(Aq, Ap, 19661, M);            /* fac=0.6 */
+    if(HP_est_gain < 3277)
+    {
+        HP_est_gain = 3277;                /* 0.1 in Q15 */
+    }
+    /* synthesis of noise: 4.8kHz..5.6kHz --> 6kHz..7kHz */
+    Weight_a(Aq, Ap, 19661, M);            /* fac=0.6 */
 
 #ifdef ASM_OPT                /* asm optimization branch */
-	Syn_filt_asm(Ap, HF, HF, st->mem_syn_hf);
-	/* noise High Pass filtering (1ms of delay) */
-	Filt_6k_7k_asm(HF, L_SUBFR16k, st->mem_hf);
-	/* filtering of the original signal */
-	Filt_6k_7k_asm(HF_SP, L_SUBFR16k, st->mem_hf2);
+    Syn_filt_asm(Ap, HF, HF, st->mem_syn_hf);
+    /* noise High Pass filtering (1ms of delay) */
+    Filt_6k_7k_asm(HF, L_SUBFR16k, st->mem_hf);
+    /* filtering of the original signal */
+    Filt_6k_7k_asm(HF_SP, L_SUBFR16k, st->mem_hf2);
 
-	/* check the gain difference */
-	Scale_sig_opt(HF_SP, L_SUBFR16k, -1);
-	ener = extract_h(Dot_product12_asm(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
-	/* set energy of white noise to energy of excitation */
-	tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
+    /* check the gain difference */
+    Scale_sig_opt(HF_SP, L_SUBFR16k, -1);
+    ener = extract_h(Dot_product12_asm(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
+    /* set energy of white noise to energy of excitation */
+    tmp = extract_h(Dot_product12_asm(HF, HF, L_SUBFR16k, &exp));
 #else
-	Syn_filt(Ap, HF, HF, L_SUBFR16k, st->mem_syn_hf, 1);
-	/* noise High Pass filtering (1ms of delay) */
-	Filt_6k_7k(HF, L_SUBFR16k, st->mem_hf);
-	/* filtering of the original signal */
-	Filt_6k_7k(HF_SP, L_SUBFR16k, st->mem_hf2);
-	/* check the gain difference */
-	Scale_sig(HF_SP, L_SUBFR16k, -1);
-	ener = extract_h(Dot_product12(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
-	/* set energy of white noise to energy of excitation */
-	tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
+    Syn_filt(Ap, HF, HF, L_SUBFR16k, st->mem_syn_hf, 1);
+    /* noise High Pass filtering (1ms of delay) */
+    Filt_6k_7k(HF, L_SUBFR16k, st->mem_hf);
+    /* filtering of the original signal */
+    Filt_6k_7k(HF_SP, L_SUBFR16k, st->mem_hf2);
+    /* check the gain difference */
+    Scale_sig(HF_SP, L_SUBFR16k, -1);
+    ener = extract_h(Dot_product12(HF_SP, HF_SP, L_SUBFR16k, &exp_ener));
+    /* set energy of white noise to energy of excitation */
+    tmp = extract_h(Dot_product12(HF, HF, L_SUBFR16k, &exp));
 #endif
 
-	if (tmp > ener)
-	{
-		tmp = (tmp >> 1);                 /* Be sure tmp < ener */
-		exp = (exp + 1);
-	}
-	L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
-	exp = vo_sub(exp, exp_ener);
-	Isqrt_n(&L_tmp, &exp);
-	L_tmp = L_shl(L_tmp, exp);             /* L_tmp, L_tmp in Q31 */
-	HP_calc_gain = extract_h(L_tmp);       /* tmp = sqrt(ener_input/ener_hf) */
+    if (tmp > ener)
+    {
+        tmp = (tmp >> 1);                 /* Be sure tmp < ener */
+        exp = (exp + 1);
+    }
+    L_tmp = L_deposit_h(div_s(tmp, ener)); /* result is normalized */
+    exp = vo_sub(exp, exp_ener);
+    Isqrt_n(&L_tmp, &exp);
+    L_tmp = L_shl(L_tmp, exp);             /* L_tmp, L_tmp in Q31 */
+    HP_calc_gain = extract_h(L_tmp);       /* tmp = sqrt(ener_input/ener_hf) */
 
-	/* st->gain_alpha *= st->dtx_encSt->dtxHangoverCount/7 */
-	L_tmp = (vo_L_mult(st->dtx_encSt->dtxHangoverCount, 4681) << 15);
-	st->gain_alpha = vo_mult(st->gain_alpha, extract_h(L_tmp));
+    /* st->gain_alpha *= st->dtx_encSt->dtxHangoverCount/7 */
+    L_tmp = (vo_L_mult(st->dtx_encSt->dtxHangoverCount, 4681) << 15);
+    st->gain_alpha = vo_mult(st->gain_alpha, extract_h(L_tmp));
 
-	if(st->dtx_encSt->dtxHangoverCount > 6)
-		st->gain_alpha = 32767;
-	HP_est_gain = HP_est_gain >> 1;     /* From Q15 to Q14 */
-	HP_corr_gain = add1(vo_mult(HP_calc_gain, st->gain_alpha), vo_mult((32767 - st->gain_alpha), HP_est_gain));
+    if(st->dtx_encSt->dtxHangoverCount > 6)
+        st->gain_alpha = 32767;
+    HP_est_gain = HP_est_gain >> 1;     /* From Q15 to Q14 */
+    HP_corr_gain = add1(vo_mult(HP_calc_gain, st->gain_alpha), vo_mult((32767 - st->gain_alpha), HP_est_gain));
 
-	/* Quantise the correction gain */
-	dist_min = 32767;
-	for (i = 0; i < 16; i++)
-	{
-		dist = vo_mult((HP_corr_gain - HP_gain[i]), (HP_corr_gain - HP_gain[i]));
-		if (dist_min > dist)
-		{
-			dist_min = dist;
-			HP_gain_ind = i;
-		}
-	}
-	HP_corr_gain = HP_gain[HP_gain_ind];
-	/* return the quantised gain index when using the highest mode, otherwise zero */
-	return (HP_gain_ind);
+    /* Quantise the correction gain */
+    dist_min = 32767;
+    for (i = 0; i < 16; i++)
+    {
+        dist = vo_mult((HP_corr_gain - HP_gain[i]), (HP_corr_gain - HP_gain[i]));
+        if (dist_min > dist)
+        {
+            dist_min = dist;
+            HP_gain_ind = i;
+        }
+    }
+    HP_corr_gain = HP_gain[HP_gain_ind];
+    /* return the quantised gain index when using the highest mode, otherwise zero */
+    return (HP_gain_ind);
 }
 
 /*************************************************
@@ -1558,33 +1562,33 @@
 
 int AMR_Enc_Encode(HAMRENC hCodec)
 {
-	Word32 i;
-	Coder_State *gData = (Coder_State*)hCodec;
-	Word16 *signal;
-	Word16 packed_size = 0;
-	Word16 prms[NB_BITS_MAX];
-	Word16 coding_mode = 0, nb_bits, allow_dtx, mode, reset_flag;
-	mode = gData->mode;
-	coding_mode = gData->mode;
-	nb_bits = nb_of_bits[mode];
-	signal = (Word16 *)gData->inputStream;
-	allow_dtx = gData->allow_dtx;
+    Word32 i;
+    Coder_State *gData = (Coder_State*)hCodec;
+    Word16 *signal;
+    Word16 packed_size = 0;
+    Word16 prms[NB_BITS_MAX];
+    Word16 coding_mode = 0, nb_bits, allow_dtx, mode, reset_flag;
+    mode = gData->mode;
+    coding_mode = gData->mode;
+    nb_bits = nb_of_bits[mode];
+    signal = (Word16 *)gData->inputStream;
+    allow_dtx = gData->allow_dtx;
 
-	/* check for homing frame */
-	reset_flag = encoder_homing_frame_test(signal);
+    /* check for homing frame */
+    reset_flag = encoder_homing_frame_test(signal);
 
-	for (i = 0; i < L_FRAME16k; i++)   /* Delete the 2 LSBs (14-bit input) */
-	{
-		*(signal + i) = (Word16) (*(signal + i) & 0xfffC);
-	}
+    for (i = 0; i < L_FRAME16k; i++)   /* Delete the 2 LSBs (14-bit input) */
+    {
+        *(signal + i) = (Word16) (*(signal + i) & 0xfffC);
+    }
 
-	coder(&coding_mode, signal, prms, &nb_bits, gData, allow_dtx);
-	packed_size = PackBits(prms, coding_mode, mode, gData);
-	if (reset_flag != 0)
-	{
-		Reset_encoder(gData, 1);
-	}
-	return packed_size;
+    coder(&coding_mode, signal, prms, &nb_bits, gData, allow_dtx);
+    packed_size = PackBits(prms, coding_mode, mode, gData);
+    if (reset_flag != 0)
+    {
+        Reset_encoder(gData, 1);
+    }
+    return packed_size;
 }
 
 /***************************************************************************
@@ -1594,94 +1598,94 @@
 ***************************************************************************/
 
 VO_U32 VO_API voAMRWB_Init(VO_HANDLE * phCodec,                   /* o: the audio codec handle */
-						   VO_AUDIO_CODINGTYPE vType,             /* i: Codec Type ID */
-						   VO_CODEC_INIT_USERDATA * pUserData     /* i: init Parameters */
-						   )
+                           VO_AUDIO_CODINGTYPE vType,             /* i: Codec Type ID */
+                           VO_CODEC_INIT_USERDATA * pUserData     /* i: init Parameters */
+                           )
 {
-	Coder_State *st;
-	FrameStream *stream;
+    Coder_State *st;
+    FrameStream *stream;
 #ifdef USE_DEAULT_MEM
-	VO_MEM_OPERATOR voMemoprator;
+    VO_MEM_OPERATOR voMemoprator;
 #endif
-	VO_MEM_OPERATOR *pMemOP;
+    VO_MEM_OPERATOR *pMemOP;
         UNUSED(vType);
 
-	int interMem = 0;
+    int interMem = 0;
 
-	if(pUserData == NULL || pUserData->memflag != VO_IMF_USERMEMOPERATOR || pUserData->memData == NULL )
-	{
+    if(pUserData == NULL || pUserData->memflag != VO_IMF_USERMEMOPERATOR || pUserData->memData == NULL )
+    {
 #ifdef USE_DEAULT_MEM
-		voMemoprator.Alloc = cmnMemAlloc;
-		voMemoprator.Copy = cmnMemCopy;
-		voMemoprator.Free = cmnMemFree;
-		voMemoprator.Set = cmnMemSet;
-		voMemoprator.Check = cmnMemCheck;
-		interMem = 1;
-		pMemOP = &voMemoprator;
+        voMemoprator.Alloc = cmnMemAlloc;
+        voMemoprator.Copy = cmnMemCopy;
+        voMemoprator.Free = cmnMemFree;
+        voMemoprator.Set = cmnMemSet;
+        voMemoprator.Check = cmnMemCheck;
+        interMem = 1;
+        pMemOP = &voMemoprator;
 #else
-		*phCodec = NULL;
-		return VO_ERR_INVALID_ARG;
+        *phCodec = NULL;
+        return VO_ERR_INVALID_ARG;
 #endif
-	}
-	else
-	{
-		pMemOP = (VO_MEM_OPERATOR *)pUserData->memData;
-	}
-	/*-------------------------------------------------------------------------*
-	 * Memory allocation for coder state.                                      *
-	 *-------------------------------------------------------------------------*/
-	if ((st = (Coder_State *)mem_malloc(pMemOP, sizeof(Coder_State), 32, VO_INDEX_ENC_AMRWB)) == NULL)
-	{
-		return VO_ERR_OUTOF_MEMORY;
-	}
+    }
+    else
+    {
+        pMemOP = (VO_MEM_OPERATOR *)pUserData->memData;
+    }
+    /*-------------------------------------------------------------------------*
+     * Memory allocation for coder state.                                      *
+     *-------------------------------------------------------------------------*/
+    if ((st = (Coder_State *)mem_malloc(pMemOP, sizeof(Coder_State), 32, VO_INDEX_ENC_AMRWB)) == NULL)
+    {
+        return VO_ERR_OUTOF_MEMORY;
+    }
 
-	st->vadSt = NULL;
-	st->dtx_encSt = NULL;
-	st->sid_update_counter = 3;
-	st->sid_handover_debt = 0;
-	st->prev_ft = TX_SPEECH;
-	st->inputStream = NULL;
-	st->inputSize = 0;
+    st->vadSt = NULL;
+    st->dtx_encSt = NULL;
+    st->sid_update_counter = 3;
+    st->sid_handover_debt = 0;
+    st->prev_ft = TX_SPEECH;
+    st->inputStream = NULL;
+    st->inputSize = 0;
 
-	/* Default setting */
-	st->mode = VOAMRWB_MD2385;                        /* bit rate 23.85kbps */
-	st->frameType = VOAMRWB_RFC3267;                  /* frame type: RFC3267 */
-	st->allow_dtx = 0;                                /* disable DTX mode */
+    /* Default setting */
+    st->mode = VOAMRWB_MD2385;                        /* bit rate 23.85kbps */
+    st->frameType = VOAMRWB_RFC3267;                  /* frame type: RFC3267 */
+    st->allow_dtx = 0;                                /* disable DTX mode */
 
-	st->outputStream = NULL;
-	st->outputSize = 0;
+    st->outputStream = NULL;
+    st->outputSize = 0;
 
-	st->stream = (FrameStream *)mem_malloc(pMemOP, sizeof(FrameStream), 32, VO_INDEX_ENC_AMRWB);
-	if(st->stream == NULL)
-		return VO_ERR_OUTOF_MEMORY;
+    st->stream = (FrameStream *)mem_malloc(pMemOP, sizeof(FrameStream), 32, VO_INDEX_ENC_AMRWB);
+    if(st->stream == NULL)
+        return VO_ERR_OUTOF_MEMORY;
 
-	st->stream->frame_ptr = (unsigned char *)mem_malloc(pMemOP, Frame_Maxsize, 32, VO_INDEX_ENC_AMRWB);
-	if(st->stream->frame_ptr == NULL)
-		return  VO_ERR_OUTOF_MEMORY;
+    st->stream->frame_ptr = (unsigned char *)mem_malloc(pMemOP, Frame_Maxsize, 32, VO_INDEX_ENC_AMRWB);
+    if(st->stream->frame_ptr == NULL)
+        return  VO_ERR_OUTOF_MEMORY;
 
-	stream = st->stream;
-	voAWB_InitFrameBuffer(stream);
+    stream = st->stream;
+    voAWB_InitFrameBuffer(stream);
 
-	wb_vad_init(&(st->vadSt), pMemOP);
-	dtx_enc_init(&(st->dtx_encSt), isf_init, pMemOP);
+    wb_vad_init(&(st->vadSt), pMemOP);
+    dtx_enc_init(&(st->dtx_encSt), isf_init, pMemOP);
 
-	Reset_encoder((void *) st, 1);
+    Reset_encoder((void *) st, 1);
 
-	if(interMem)
-	{
-		st->voMemoprator.Alloc = cmnMemAlloc;
-		st->voMemoprator.Copy = cmnMemCopy;
-		st->voMemoprator.Free = cmnMemFree;
-		st->voMemoprator.Set = cmnMemSet;
-		st->voMemoprator.Check = cmnMemCheck;
-		pMemOP = &st->voMemoprator;
-	}
+    if(interMem)
+    {
+        st->voMemoprator.Alloc = cmnMemAlloc;
+        st->voMemoprator.Copy = cmnMemCopy;
+        st->voMemoprator.Free = cmnMemFree;
+        st->voMemoprator.Set = cmnMemSet;
+        st->voMemoprator.Check = cmnMemCheck;
+        pMemOP = &st->voMemoprator;
+    }
 
-	st->pvoMemop = pMemOP;
+    st->pvoMemop = pMemOP;
 
-	*phCodec = (void *) st;
+    *phCodec = (void *) st;
 
-	return VO_ERR_NONE;
+    return VO_ERR_NONE;
 }
 
 /**********************************************************************************
@@ -1691,32 +1695,32 @@
 ***********************************************************************************/
 
 VO_U32 VO_API voAMRWB_SetInputData(
-		VO_HANDLE hCodec,                   /* i/o: The codec handle which was created by Init function */
-		VO_CODECBUFFER * pInput             /*   i: The input buffer parameter  */
-		)
+        VO_HANDLE hCodec,                   /* i/o: The codec handle which was created by Init function */
+        VO_CODECBUFFER * pInput             /*   i: The input buffer parameter  */
+        )
 {
-	Coder_State  *gData;
-	FrameStream  *stream;
+    Coder_State  *gData;
+    FrameStream  *stream;
 
-	if(NULL == hCodec)
-	{
-		return VO_ERR_INVALID_ARG;
-	}
+    if(NULL == hCodec)
+    {
+        return VO_ERR_INVALID_ARG;
+    }
 
-	gData = (Coder_State *)hCodec;
-	stream = gData->stream;
+    gData = (Coder_State *)hCodec;
+    stream = gData->stream;
 
-	if(NULL == pInput || NULL == pInput->Buffer)
-	{
-		return VO_ERR_INVALID_ARG;
-	}
+    if(NULL == pInput || NULL == pInput->Buffer)
+    {
+        return VO_ERR_INVALID_ARG;
+    }
 
-	stream->set_ptr    = pInput->Buffer;
-	stream->set_len    = pInput->Length;
-	stream->frame_ptr  = stream->frame_ptr_bk;
-	stream->used_len   = 0;
+    stream->set_ptr    = pInput->Buffer;
+    stream->set_len    = pInput->Length;
+    stream->frame_ptr  = stream->frame_ptr_bk;
+    stream->used_len   = 0;
 
-	return VO_ERR_NONE;
+    return VO_ERR_NONE;
 }
 
 /**************************************************************************************
@@ -1726,52 +1730,52 @@
 ***************************************************************************************/
 
 VO_U32 VO_API voAMRWB_GetOutputData(
-		VO_HANDLE hCodec,                    /* i: The Codec Handle which was created by Init function*/
-		VO_CODECBUFFER * pOutput,            /* o: The output audio data */
-		VO_AUDIO_OUTPUTINFO * pAudioFormat   /* o: The encoder module filled audio format and used the input size*/
-		)
+        VO_HANDLE hCodec,                    /* i: The Codec Handle which was created by Init function*/
+        VO_CODECBUFFER * pOutput,            /* o: The output audio data */
+        VO_AUDIO_OUTPUTINFO * pAudioFormat   /* o: The encoder module filled audio format and used the input size*/
+        )
 {
-	Coder_State* gData = (Coder_State*)hCodec;
-	VO_MEM_OPERATOR  *pMemOP;
-	FrameStream  *stream = (FrameStream *)gData->stream;
-	pMemOP = (VO_MEM_OPERATOR  *)gData->pvoMemop;
+    Coder_State* gData = (Coder_State*)hCodec;
+    VO_MEM_OPERATOR  *pMemOP;
+    FrameStream  *stream = (FrameStream *)gData->stream;
+    pMemOP = (VO_MEM_OPERATOR  *)gData->pvoMemop;
 
-	if(stream->framebuffer_len  < Frame_MaxByte)         /* check the work buffer len */
-	{
-		stream->frame_storelen = stream->framebuffer_len;
-		if(stream->frame_storelen)
-		{
-			pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk , stream->frame_ptr , stream->frame_storelen);
-		}
-		if(stream->set_len > 0)
-		{
-			voAWB_UpdateFrameBuffer(stream, pMemOP);
-		}
-		if(stream->framebuffer_len < Frame_MaxByte)
-		{
-			if(pAudioFormat)
-				pAudioFormat->InputUsed = stream->used_len;
-			return VO_ERR_INPUT_BUFFER_SMALL;
-		}
-	}
+    if(stream->framebuffer_len  < Frame_MaxByte)         /* check the work buffer len */
+    {
+        stream->frame_storelen = stream->framebuffer_len;
+        if(stream->frame_storelen)
+        {
+            pMemOP->Copy(VO_INDEX_ENC_AMRWB, stream->frame_ptr_bk , stream->frame_ptr , stream->frame_storelen);
+        }
+        if(stream->set_len > 0)
+        {
+            voAWB_UpdateFrameBuffer(stream, pMemOP);
+        }
+        if(stream->framebuffer_len < Frame_MaxByte)
+        {
+            if(pAudioFormat)
+                pAudioFormat->InputUsed = stream->used_len;
+            return VO_ERR_INPUT_BUFFER_SMALL;
+        }
+    }
 
-	gData->inputStream = stream->frame_ptr;
-	gData->outputStream = (unsigned short*)pOutput->Buffer;
+    gData->inputStream = stream->frame_ptr;
+    gData->outputStream = (unsigned short*)pOutput->Buffer;
 
-	gData->outputSize = AMR_Enc_Encode(gData);         /* encoder main function */
+    gData->outputSize = AMR_Enc_Encode(gData);         /* encoder main function */
 
-	pOutput->Length = gData->outputSize;               /* get the output buffer length */
-	stream->frame_ptr += 640;                          /* update the work buffer ptr */
-	stream->framebuffer_len  -= 640;
+    pOutput->Length = gData->outputSize;               /* get the output buffer length */
+    stream->frame_ptr += 640;                          /* update the work buffer ptr */
+    stream->framebuffer_len  -= 640;
 
-	if(pAudioFormat)                                   /* return output audio information */
-	{
-		pAudioFormat->Format.Channels = 1;
-		pAudioFormat->Format.SampleRate = 8000;
-		pAudioFormat->Format.SampleBits = 16;
-		pAudioFormat->InputUsed = stream->used_len;
-	}
-	return VO_ERR_NONE;
+    if(pAudioFormat)                                   /* return output audio information */
+    {
+        pAudioFormat->Format.Channels = 1;
+        pAudioFormat->Format.SampleRate = 8000;
+        pAudioFormat->Format.SampleBits = 16;
+        pAudioFormat->InputUsed = stream->used_len;
+    }
+    return VO_ERR_NONE;
 }
 
 /*************************************************************************
@@ -1782,50 +1786,50 @@
 
 
 VO_U32 VO_API voAMRWB_SetParam(
-		VO_HANDLE hCodec,   /* i/o: The Codec Handle which was created by Init function */
-		VO_S32 uParamID,    /*   i: The param ID */
-		VO_PTR pData        /*   i: The param value depend on the ID */
-		)
+        VO_HANDLE hCodec,   /* i/o: The Codec Handle which was created by Init function */
+        VO_S32 uParamID,    /*   i: The param ID */
+        VO_PTR pData        /*   i: The param value depend on the ID */
+        )
 {
-	Coder_State* gData = (Coder_State*)hCodec;
-	FrameStream *stream = (FrameStream *)(gData->stream);
-	int *lValue = (int*)pData;
+    Coder_State* gData = (Coder_State*)hCodec;
+    FrameStream *stream = (FrameStream *)(gData->stream);
+    int *lValue = (int*)pData;
 
-	switch(uParamID)
-	{
-		/* setting AMR-WB frame type*/
-		case VO_PID_AMRWB_FRAMETYPE:
-			if(*lValue < VOAMRWB_DEFAULT || *lValue > VOAMRWB_RFC3267)
-				return VO_ERR_WRONG_PARAM_ID;
-			gData->frameType = *lValue;
-			break;
-		/* setting AMR-WB bit rate */
-		case VO_PID_AMRWB_MODE:
-			{
-				if(*lValue < VOAMRWB_MD66 || *lValue > VOAMRWB_MD2385)
-					return VO_ERR_WRONG_PARAM_ID;
-				gData->mode = *lValue;
-			}
-			break;
-		/* enable or disable DTX mode */
-		case VO_PID_AMRWB_DTX:
-			gData->allow_dtx = (Word16)(*lValue);
-			break;
+    switch(uParamID)
+    {
+        /* setting AMR-WB frame type*/
+        case VO_PID_AMRWB_FRAMETYPE:
+            if(*lValue < VOAMRWB_DEFAULT || *lValue > VOAMRWB_RFC3267)
+                return VO_ERR_WRONG_PARAM_ID;
+            gData->frameType = *lValue;
+            break;
+        /* setting AMR-WB bit rate */
+        case VO_PID_AMRWB_MODE:
+            {
+                if(*lValue < VOAMRWB_MD66 || *lValue > VOAMRWB_MD2385)
+                    return VO_ERR_WRONG_PARAM_ID;
+                gData->mode = *lValue;
+            }
+            break;
+        /* enable or disable DTX mode */
+        case VO_PID_AMRWB_DTX:
+            gData->allow_dtx = (Word16)(*lValue);
+            break;
 
-		case VO_PID_COMMON_HEADDATA:
-			break;
+        case VO_PID_COMMON_HEADDATA:
+            break;
         /* flush the work buffer */
-		case VO_PID_COMMON_FLUSH:
-			stream->set_ptr = NULL;
-			stream->frame_storelen = 0;
-			stream->framebuffer_len = 0;
-			stream->set_len = 0;
-			break;
+        case VO_PID_COMMON_FLUSH:
+            stream->set_ptr = NULL;
+            stream->frame_storelen = 0;
+            stream->framebuffer_len = 0;
+            stream->set_len = 0;
+            break;
 
-		default:
-			return VO_ERR_WRONG_PARAM_ID;
-	}
-	return VO_ERR_NONE;
+        default:
+            return VO_ERR_WRONG_PARAM_ID;
+    }
+    return VO_ERR_NONE;
 }
 
 /**************************************************************************
@@ -1835,52 +1839,52 @@
 ***************************************************************************/
 
 VO_U32 VO_API voAMRWB_GetParam(
-		VO_HANDLE hCodec,      /* i: The Codec Handle which was created by Init function */
-		VO_S32 uParamID,       /* i: The param ID */
-		VO_PTR pData           /* o: The param value depend on the ID */
-		)
+        VO_HANDLE hCodec,      /* i: The Codec Handle which was created by Init function */
+        VO_S32 uParamID,       /* i: The param ID */
+        VO_PTR pData           /* o: The param value depend on the ID */
+        )
 {
-	int    temp;
-	Coder_State* gData = (Coder_State*)hCodec;
+    int    temp;
+    Coder_State* gData = (Coder_State*)hCodec;
 
-	if (gData==NULL)
-		return VO_ERR_INVALID_ARG;
-	switch(uParamID)
-	{
-		/* output audio format */
-		case VO_PID_AMRWB_FORMAT:
-			{
-				VO_AUDIO_FORMAT* fmt = (VO_AUDIO_FORMAT*)pData;
-				fmt->Channels   = 1;
-				fmt->SampleRate = 16000;
-				fmt->SampleBits = 16;
-				break;
-			}
+    if (gData==NULL)
+        return VO_ERR_INVALID_ARG;
+    switch(uParamID)
+    {
+        /* output audio format */
+        case VO_PID_AMRWB_FORMAT:
+            {
+                VO_AUDIO_FORMAT* fmt = (VO_AUDIO_FORMAT*)pData;
+                fmt->Channels   = 1;
+                fmt->SampleRate = 16000;
+                fmt->SampleBits = 16;
+                break;
+            }
         /* output audio channel number */
-		case VO_PID_AMRWB_CHANNELS:
-			temp = 1;
-			pData = (void *)(&temp);
-			break;
+        case VO_PID_AMRWB_CHANNELS:
+            temp = 1;
+            pData = (void *)(&temp);
+            break;
         /* output audio sample rate */
-		case VO_PID_AMRWB_SAMPLERATE:
-			temp = 16000;
-			pData = (void *)(&temp);
-			break;
-		/* output audio frame type */
-		case VO_PID_AMRWB_FRAMETYPE:
-			temp = gData->frameType;
-			pData = (void *)(&temp);
-			break;
-		/* output audio bit rate */
-		case VO_PID_AMRWB_MODE:
-			temp = gData->mode;
-			pData = (void *)(&temp);
-			break;
-		default:
-			return VO_ERR_WRONG_PARAM_ID;
-	}
+        case VO_PID_AMRWB_SAMPLERATE:
+            temp = 16000;
+            pData = (void *)(&temp);
+            break;
+        /* output audio frame type */
+        case VO_PID_AMRWB_FRAMETYPE:
+            temp = gData->frameType;
+            pData = (void *)(&temp);
+            break;
+        /* output audio bit rate */
+        case VO_PID_AMRWB_MODE:
+            temp = gData->mode;
+            pData = (void *)(&temp);
+            break;
+        default:
+            return VO_ERR_WRONG_PARAM_ID;
+    }
 
-	return VO_ERR_NONE;
+    return VO_ERR_NONE;
 }
 
 /***********************************************************************************
@@ -1890,32 +1894,32 @@
 *************************************************************************************/
 
 VO_U32 VO_API voAMRWB_Uninit(VO_HANDLE hCodec           /* i/o: Codec handle pointer */
-							 )
+                             )
 {
-	Coder_State* gData = (Coder_State*)hCodec;
-	VO_MEM_OPERATOR *pMemOP;
-	pMemOP = gData->pvoMemop;
+    Coder_State* gData = (Coder_State*)hCodec;
+    VO_MEM_OPERATOR *pMemOP;
+    pMemOP = gData->pvoMemop;
 
-	if(hCodec)
-	{
-		if(gData->stream)
-		{
-			if(gData->stream->frame_ptr_bk)
-			{
-				mem_free(pMemOP, gData->stream->frame_ptr_bk, VO_INDEX_ENC_AMRWB);
-				gData->stream->frame_ptr_bk = NULL;
-			}
-			mem_free(pMemOP, gData->stream, VO_INDEX_ENC_AMRWB);
-			gData->stream = NULL;
-		}
-		wb_vad_exit(&(((Coder_State *) gData)->vadSt), pMemOP);
-		dtx_enc_exit(&(((Coder_State *) gData)->dtx_encSt), pMemOP);
+    if(hCodec)
+    {
+        if(gData->stream)
+        {
+            if(gData->stream->frame_ptr_bk)
+            {
+                mem_free(pMemOP, gData->stream->frame_ptr_bk, VO_INDEX_ENC_AMRWB);
+                gData->stream->frame_ptr_bk = NULL;
+            }
+            mem_free(pMemOP, gData->stream, VO_INDEX_ENC_AMRWB);
+            gData->stream = NULL;
+        }
+        wb_vad_exit(&(((Coder_State *) gData)->vadSt), pMemOP);
+        dtx_enc_exit(&(((Coder_State *) gData)->dtx_encSt), pMemOP);
 
-		mem_free(pMemOP, hCodec, VO_INDEX_ENC_AMRWB);
-		hCodec = NULL;
-	}
+        mem_free(pMemOP, hCodec, VO_INDEX_ENC_AMRWB);
+        hCodec = NULL;
+    }
 
-	return VO_ERR_NONE;
+    return VO_ERR_NONE;
 }
 
 /********************************************************************************
@@ -1925,19 +1929,19 @@
 ********************************************************************************/
 
 VO_S32 VO_API voGetAMRWBEncAPI(
-							   VO_AUDIO_CODECAPI * pEncHandle      /* i/o: Codec handle pointer */
-							   )
+                               VO_AUDIO_CODECAPI * pEncHandle      /* i/o: Codec handle pointer */
+                               )
 {
-	if(NULL == pEncHandle)
-		return VO_ERR_INVALID_ARG;
-	pEncHandle->Init = voAMRWB_Init;
-	pEncHandle->SetInputData = voAMRWB_SetInputData;
-	pEncHandle->GetOutputData = voAMRWB_GetOutputData;
-	pEncHandle->SetParam = voAMRWB_SetParam;
-	pEncHandle->GetParam = voAMRWB_GetParam;
-	pEncHandle->Uninit = voAMRWB_Uninit;
+    if(NULL == pEncHandle)
+        return VO_ERR_INVALID_ARG;
+    pEncHandle->Init = voAMRWB_Init;
+    pEncHandle->SetInputData = voAMRWB_SetInputData;
+    pEncHandle->GetOutputData = voAMRWB_GetOutputData;
+    pEncHandle->SetParam = voAMRWB_SetParam;
+    pEncHandle->GetParam = voAMRWB_GetParam;
+    pEncHandle->Uninit = voAMRWB_Uninit;
 
-	return VO_ERR_NONE;
+    return VO_ERR_NONE;
 }
 
 #ifdef __cplusplus
diff --git a/media/libstagefright/codecs/amrwbenc/src/voicefac.c b/media/libstagefright/codecs/amrwbenc/src/voicefac.c
index d890044..c9f48c2 100644
--- a/media/libstagefright/codecs/amrwbenc/src/voicefac.c
+++ b/media/libstagefright/codecs/amrwbenc/src/voicefac.c
@@ -26,65 +26,65 @@
 #include "math_op.h"
 
 Word16 voice_factor(                                  /* (o) Q15   : factor (-1=unvoiced to 1=voiced) */
-		Word16 exc[],                         /* (i) Q_exc : pitch excitation                 */
-		Word16 Q_exc,                         /* (i)       : exc format                       */
-		Word16 gain_pit,                      /* (i) Q14   : gain of pitch                    */
-		Word16 code[],                        /* (i) Q9    : Fixed codebook excitation        */
-		Word16 gain_code,                     /* (i) Q0    : gain of code                     */
-		Word16 L_subfr                        /* (i)       : subframe length                  */
-		)
+        Word16 exc[],                         /* (i) Q_exc : pitch excitation                 */
+        Word16 Q_exc,                         /* (i)       : exc format                       */
+        Word16 gain_pit,                      /* (i) Q14   : gain of pitch                    */
+        Word16 code[],                        /* (i) Q9    : Fixed codebook excitation        */
+        Word16 gain_code,                     /* (i) Q0    : gain of code                     */
+        Word16 L_subfr                        /* (i)       : subframe length                  */
+        )
 {
-	Word16 tmp, exp, ener1, exp1, ener2, exp2;
-	Word32 i, L_tmp;
+    Word16 tmp, exp, ener1, exp1, ener2, exp2;
+    Word32 i, L_tmp;
 
 #ifdef ASM_OPT               /* asm optimization branch */
-	ener1 = extract_h(Dot_product12_asm(exc, exc, L_subfr, &exp1));
+    ener1 = extract_h(Dot_product12_asm(exc, exc, L_subfr, &exp1));
 #else
-	ener1 = extract_h(Dot_product12(exc, exc, L_subfr, &exp1));
+    ener1 = extract_h(Dot_product12(exc, exc, L_subfr, &exp1));
 #endif
-	exp1 = exp1 - (Q_exc + Q_exc);
-	L_tmp = vo_L_mult(gain_pit, gain_pit);
-	exp = norm_l(L_tmp);
-	tmp = extract_h(L_tmp << exp);
-	ener1 = vo_mult(ener1, tmp);
-	exp1 = exp1 - exp - 10;        /* 10 -> gain_pit Q14 to Q9 */
+    exp1 = exp1 - (Q_exc + Q_exc);
+    L_tmp = vo_L_mult(gain_pit, gain_pit);
+    exp = norm_l(L_tmp);
+    tmp = extract_h(L_tmp << exp);
+    ener1 = vo_mult(ener1, tmp);
+    exp1 = exp1 - exp - 10;        /* 10 -> gain_pit Q14 to Q9 */
 
 #ifdef ASM_OPT                /* asm optimization branch */
-	ener2 = extract_h(Dot_product12_asm(code, code, L_subfr, &exp2));
+    ener2 = extract_h(Dot_product12_asm(code, code, L_subfr, &exp2));
 #else
-	ener2 = extract_h(Dot_product12(code, code, L_subfr, &exp2));
+    ener2 = extract_h(Dot_product12(code, code, L_subfr, &exp2));
 #endif
 
-	exp = norm_s(gain_code);
-	tmp = gain_code << exp;
-	tmp = vo_mult(tmp, tmp);
-	ener2 = vo_mult(ener2, tmp);
-	exp2 = exp2 - (exp + exp);
+    exp = norm_s(gain_code);
+    tmp = gain_code << exp;
+    tmp = vo_mult(tmp, tmp);
+    ener2 = vo_mult(ener2, tmp);
+    exp2 = exp2 - (exp + exp);
 
-	i = exp1 - exp2;
+    i = exp1 - exp2;
 
-	if (i >= 0)
-	{
-		ener1 = ener1 >> 1;
-		ener2 = ener2 >> (i + 1);
-	} else
-	{
-		ener1 = ener1 >> (1 - i);
-		ener2 = ener2 >> 1;
-	}
+    if (i >= 0)
+    {
+        ener1 = ener1 >> 1;
+        ener2 = ener2 >> (i + 1);
+    } else
+    {
+        ener1 = ener1 >> (1 - i);
+        ener2 = ener2 >> 1;
+    }
 
-	tmp = vo_sub(ener1, ener2);
-	ener1 = add1(add1(ener1, ener2), 1);
+    tmp = vo_sub(ener1, ener2);
+    ener1 = add1(add1(ener1, ener2), 1);
 
-	if (tmp >= 0)
-	{
-		tmp = div_s(tmp, ener1);
-	} else
-	{
-		tmp = vo_negate(div_s(vo_negate(tmp), ener1));
-	}
+    if (tmp >= 0)
+    {
+        tmp = div_s(tmp, ener1);
+    } else
+    {
+        tmp = vo_negate(div_s(vo_negate(tmp), ener1));
+    }
 
-	return (tmp);
+    return (tmp);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/wb_vad.c b/media/libstagefright/codecs/amrwbenc/src/wb_vad.c
index 2beaefd..866a69c 100644
--- a/media/libstagefright/codecs/amrwbenc/src/wb_vad.c
+++ b/media/libstagefright/codecs/amrwbenc/src/wb_vad.c
@@ -44,30 +44,30 @@
 *********************************************************************************/
 
 static Word16 ilog2(                       /* return: output value of the log2 */
-		Word16 mant                        /* i: value to be converted */
-		)
+        Word16 mant                        /* i: value to be converted */
+        )
 {
-	Word16 ex, ex2, res;
-	Word32 i, l_temp;
+    Word16 ex, ex2, res;
+    Word32 i, l_temp;
 
-	if (mant <= 0)
-	{
-		mant = 1;
-	}
-	ex = norm_s(mant);
-	mant = mant << ex;
+    if (mant <= 0)
+    {
+        mant = 1;
+    }
+    ex = norm_s(mant);
+    mant = mant << ex;
 
-	for (i = 0; i < 3; i++)
-		mant = vo_mult(mant, mant);
-	l_temp = vo_L_mult(mant, mant);
+    for (i = 0; i < 3; i++)
+        mant = vo_mult(mant, mant);
+    l_temp = vo_L_mult(mant, mant);
 
-	ex2 = norm_l(l_temp);
-	mant = extract_h(l_temp << ex2);
+    ex2 = norm_l(l_temp);
+    mant = extract_h(l_temp << ex2);
 
-	res = (ex + 16) << 10;
-	res = add1(res, (ex2 << 6));
-	res = vo_sub(add1(res, 127), (mant >> 8));
-	return (res);
+    res = (ex + 16) << 10;
+    res = add1(res, (ex2 << 6));
+    res = vo_sub(add1(res, 127), (mant >> 8));
+    return (res);
 }
 
 /******************************************************************************
@@ -79,23 +79,23 @@
 *******************************************************************************/
 
 static void filter5(
-		Word16 * in0,                         /* i/o : input values; output low-pass part  */
-		Word16 * in1,                         /* i/o : input values; output high-pass part */
-		Word16 data[]                         /* i/o : filter memory                       */
-		)
+        Word16 * in0,                         /* i/o : input values; output low-pass part  */
+        Word16 * in1,                         /* i/o : input values; output high-pass part */
+        Word16 data[]                         /* i/o : filter memory                       */
+        )
 {
-	Word16 temp0, temp1, temp2;
+    Word16 temp0, temp1, temp2;
 
-	temp0 = vo_sub(*in0, vo_mult(COEFF5_1, data[0]));
-	temp1 = add1(data[0], vo_mult(COEFF5_1, temp0));
-	data[0] = temp0;
+    temp0 = vo_sub(*in0, vo_mult(COEFF5_1, data[0]));
+    temp1 = add1(data[0], vo_mult(COEFF5_1, temp0));
+    data[0] = temp0;
 
-	temp0 = vo_sub(*in1, vo_mult(COEFF5_2, data[1]));
-	temp2 = add1(data[1], vo_mult(COEFF5_2, temp0));
-	data[1] = temp0;
+    temp0 = vo_sub(*in1, vo_mult(COEFF5_2, data[1]));
+    temp2 = add1(data[1], vo_mult(COEFF5_2, temp0));
+    data[1] = temp0;
 
-	*in0 = extract_h((vo_L_add(temp1, temp2) << 15));
-	*in1 = extract_h((vo_L_sub(temp1, temp2) << 15));
+    *in0 = extract_h((vo_L_add(temp1, temp2) << 15));
+    *in1 = extract_h((vo_L_sub(temp1, temp2) << 15));
 }
 
 /******************************************************************************
@@ -107,19 +107,19 @@
 *******************************************************************************/
 
 static void filter3(
-		Word16 * in0,                         /* i/o : input values; output low-pass part  */
-		Word16 * in1,                         /* i/o : input values; output high-pass part */
-		Word16 * data                         /* i/o : filter memory                       */
-		)
+        Word16 * in0,                         /* i/o : input values; output low-pass part  */
+        Word16 * in1,                         /* i/o : input values; output high-pass part */
+        Word16 * data                         /* i/o : filter memory                       */
+        )
 {
-	Word16 temp1, temp2;
+    Word16 temp1, temp2;
 
-	temp1 = vo_sub(*in1, vo_mult(COEFF3, *data));
-	temp2 = add1(*data, vo_mult(COEFF3, temp1));
-	*data = temp1;
+    temp1 = vo_sub(*in1, vo_mult(COEFF3, *data));
+    temp2 = add1(*data, vo_mult(COEFF3, temp1));
+    *data = temp1;
 
-	*in1 = extract_h((vo_L_sub(*in0, temp2) << 15));
-	*in0 = extract_h((vo_L_add(*in0, temp2) << 15));
+    *in1 = extract_h((vo_L_sub(*in0, temp2) << 15));
+    *in0 = extract_h((vo_L_add(*in0, temp2) << 15));
 }
 
 /******************************************************************************
@@ -135,36 +135,36 @@
 ******************************************************************************/
 
 static Word16 level_calculation(                      /* return: signal level */
-		Word16 data[],                        /* i   : signal buffer                                    */
-		Word16 * sub_level,                   /* i   : level calculated at the end of the previous frame*/
-		                                      /* o   : level of signal calculated from the last         */
-		                                      /*       (count2 - count1) samples                        */
-		Word16 count1,                        /* i   : number of samples to be counted                  */
-		Word16 count2,                        /* i   : number of samples to be counted                  */
-		Word16 ind_m,                         /* i   : step size for the index of the data buffer       */
-		Word16 ind_a,                         /* i   : starting index of the data buffer                */
-		Word16 scale                          /* i   : scaling for the level calculation                */
-		)
+        Word16 data[],                        /* i   : signal buffer                                    */
+        Word16 * sub_level,                   /* i   : level calculated at the end of the previous frame*/
+                                              /* o   : level of signal calculated from the last         */
+                                              /*       (count2 - count1) samples                        */
+        Word16 count1,                        /* i   : number of samples to be counted                  */
+        Word16 count2,                        /* i   : number of samples to be counted                  */
+        Word16 ind_m,                         /* i   : step size for the index of the data buffer       */
+        Word16 ind_a,                         /* i   : starting index of the data buffer                */
+        Word16 scale                          /* i   : scaling for the level calculation                */
+        )
 {
-	Word32 i, l_temp1, l_temp2;
-	Word16 level;
+    Word32 i, l_temp1, l_temp2;
+    Word16 level;
 
-	l_temp1 = 0L;
-	for (i = count1; i < count2; i++)
-	{
-		l_temp1 += (abs_s(data[ind_m * i + ind_a])<<1);
-	}
+    l_temp1 = 0L;
+    for (i = count1; i < count2; i++)
+    {
+        l_temp1 += (abs_s(data[ind_m * i + ind_a])<<1);
+    }
 
-	l_temp2 = vo_L_add(l_temp1, L_shl(*sub_level, 16 - scale));
-	*sub_level = extract_h(L_shl(l_temp1, scale));
+    l_temp2 = vo_L_add(l_temp1, L_shl(*sub_level, 16 - scale));
+    *sub_level = extract_h(L_shl(l_temp1, scale));
 
-	for (i = 0; i < count1; i++)
-	{
-		l_temp2 += (abs_s(data[ind_m * i + ind_a])<<1);
-	}
-	level = extract_h(L_shl2(l_temp2, scale));
+    for (i = 0; i < count1; i++)
+    {
+        l_temp2 += (abs_s(data[ind_m * i + ind_a])<<1);
+    }
+    level = extract_h(L_shl2(l_temp2, scale));
 
-	return level;
+    return level;
 }
 
 /******************************************************************************
@@ -176,75 +176,75 @@
 *******************************************************************************/
 
 static void filter_bank(
-		VadVars * st,                         /* i/o : State struct               */
-		Word16 in[],                          /* i   : input frame                */
-		Word16 level[]                        /* o   : signal levels at each band */
-		)
+        VadVars * st,                         /* i/o : State struct               */
+        Word16 in[],                          /* i   : input frame                */
+        Word16 level[]                        /* o   : signal levels at each band */
+        )
 {
-	Word32 i;
-	Word16 tmp_buf[FRAME_LEN];
+    Word32 i;
+    Word16 tmp_buf[FRAME_LEN];
 
-	/* shift input 1 bit down for safe scaling */
-	for (i = 0; i < FRAME_LEN; i++)
-	{
-		tmp_buf[i] = in[i] >> 1;
-	}
+    /* shift input 1 bit down for safe scaling */
+    for (i = 0; i < FRAME_LEN; i++)
+    {
+        tmp_buf[i] = in[i] >> 1;
+    }
 
-	/* run the filter bank */
-	for (i = 0; i < 128; i++)
-	{
-		filter5(&tmp_buf[2 * i], &tmp_buf[2 * i + 1], st->a_data5[0]);
-	}
-	for (i = 0; i < 64; i++)
-	{
-		filter5(&tmp_buf[4 * i], &tmp_buf[4 * i + 2], st->a_data5[1]);
-		filter5(&tmp_buf[4 * i + 1], &tmp_buf[4 * i + 3], st->a_data5[2]);
-	}
-	for (i = 0; i < 32; i++)
-	{
-		filter5(&tmp_buf[8 * i], &tmp_buf[8 * i + 4], st->a_data5[3]);
-		filter5(&tmp_buf[8 * i + 2], &tmp_buf[8 * i + 6], st->a_data5[4]);
-		filter3(&tmp_buf[8 * i + 3], &tmp_buf[8 * i + 7], &st->a_data3[0]);
-	}
-	for (i = 0; i < 16; i++)
-	{
-		filter3(&tmp_buf[16 * i + 0], &tmp_buf[16 * i + 8], &st->a_data3[1]);
-		filter3(&tmp_buf[16 * i + 4], &tmp_buf[16 * i + 12], &st->a_data3[2]);
-		filter3(&tmp_buf[16 * i + 6], &tmp_buf[16 * i + 14], &st->a_data3[3]);
-	}
+    /* run the filter bank */
+    for (i = 0; i < 128; i++)
+    {
+        filter5(&tmp_buf[2 * i], &tmp_buf[2 * i + 1], st->a_data5[0]);
+    }
+    for (i = 0; i < 64; i++)
+    {
+        filter5(&tmp_buf[4 * i], &tmp_buf[4 * i + 2], st->a_data5[1]);
+        filter5(&tmp_buf[4 * i + 1], &tmp_buf[4 * i + 3], st->a_data5[2]);
+    }
+    for (i = 0; i < 32; i++)
+    {
+        filter5(&tmp_buf[8 * i], &tmp_buf[8 * i + 4], st->a_data5[3]);
+        filter5(&tmp_buf[8 * i + 2], &tmp_buf[8 * i + 6], st->a_data5[4]);
+        filter3(&tmp_buf[8 * i + 3], &tmp_buf[8 * i + 7], &st->a_data3[0]);
+    }
+    for (i = 0; i < 16; i++)
+    {
+        filter3(&tmp_buf[16 * i + 0], &tmp_buf[16 * i + 8], &st->a_data3[1]);
+        filter3(&tmp_buf[16 * i + 4], &tmp_buf[16 * i + 12], &st->a_data3[2]);
+        filter3(&tmp_buf[16 * i + 6], &tmp_buf[16 * i + 14], &st->a_data3[3]);
+    }
 
-	for (i = 0; i < 8; i++)
-	{
-		filter3(&tmp_buf[32 * i + 0], &tmp_buf[32 * i + 16], &st->a_data3[4]);
-		filter3(&tmp_buf[32 * i + 8], &tmp_buf[32 * i + 24], &st->a_data3[5]);
-	}
+    for (i = 0; i < 8; i++)
+    {
+        filter3(&tmp_buf[32 * i + 0], &tmp_buf[32 * i + 16], &st->a_data3[4]);
+        filter3(&tmp_buf[32 * i + 8], &tmp_buf[32 * i + 24], &st->a_data3[5]);
+    }
 
-	/* calculate levels in each frequency band */
+    /* calculate levels in each frequency band */
 
-	/* 4800 - 6400 Hz */
-	level[11] = level_calculation(tmp_buf, &st->sub_level[11], 16, 64, 4, 1, 14);
-	/* 4000 - 4800 Hz */
-	level[10] = level_calculation(tmp_buf, &st->sub_level[10], 8, 32, 8, 7, 15);
-	/* 3200 - 4000 Hz */
-	level[9] = level_calculation(tmp_buf, &st->sub_level[9],8, 32, 8, 3, 15);
-	/* 2400 - 3200 Hz */
-	level[8] = level_calculation(tmp_buf, &st->sub_level[8],8, 32, 8, 2, 15);
-	/* 2000 - 2400 Hz */
-	level[7] = level_calculation(tmp_buf, &st->sub_level[7],4, 16, 16, 14, 16);
-	/* 1600 - 2000 Hz */
-	level[6] = level_calculation(tmp_buf, &st->sub_level[6],4, 16, 16, 6, 16);
-	/* 1200 - 1600 Hz */
-	level[5] = level_calculation(tmp_buf, &st->sub_level[5],4, 16, 16, 4, 16);
-	/* 800 - 1200 Hz */
-	level[4] = level_calculation(tmp_buf, &st->sub_level[4],4, 16, 16, 12, 16);
-	/* 600 - 800 Hz */
-	level[3] = level_calculation(tmp_buf, &st->sub_level[3],2, 8, 32, 8, 17);
-	/* 400 - 600 Hz */
-	level[2] = level_calculation(tmp_buf, &st->sub_level[2],2, 8, 32, 24, 17);
-	/* 200 - 400 Hz */
-	level[1] = level_calculation(tmp_buf, &st->sub_level[1],2, 8, 32, 16, 17);
-	/* 0 - 200 Hz */
-	level[0] = level_calculation(tmp_buf, &st->sub_level[0],2, 8, 32, 0, 17);
+    /* 4800 - 6400 Hz */
+    level[11] = level_calculation(tmp_buf, &st->sub_level[11], 16, 64, 4, 1, 14);
+    /* 4000 - 4800 Hz */
+    level[10] = level_calculation(tmp_buf, &st->sub_level[10], 8, 32, 8, 7, 15);
+    /* 3200 - 4000 Hz */
+    level[9] = level_calculation(tmp_buf, &st->sub_level[9],8, 32, 8, 3, 15);
+    /* 2400 - 3200 Hz */
+    level[8] = level_calculation(tmp_buf, &st->sub_level[8],8, 32, 8, 2, 15);
+    /* 2000 - 2400 Hz */
+    level[7] = level_calculation(tmp_buf, &st->sub_level[7],4, 16, 16, 14, 16);
+    /* 1600 - 2000 Hz */
+    level[6] = level_calculation(tmp_buf, &st->sub_level[6],4, 16, 16, 6, 16);
+    /* 1200 - 1600 Hz */
+    level[5] = level_calculation(tmp_buf, &st->sub_level[5],4, 16, 16, 4, 16);
+    /* 800 - 1200 Hz */
+    level[4] = level_calculation(tmp_buf, &st->sub_level[4],4, 16, 16, 12, 16);
+    /* 600 - 800 Hz */
+    level[3] = level_calculation(tmp_buf, &st->sub_level[3],2, 8, 32, 8, 17);
+    /* 400 - 600 Hz */
+    level[2] = level_calculation(tmp_buf, &st->sub_level[2],2, 8, 32, 24, 17);
+    /* 200 - 400 Hz */
+    level[1] = level_calculation(tmp_buf, &st->sub_level[1],2, 8, 32, 16, 17);
+    /* 0 - 200 Hz */
+    level[0] = level_calculation(tmp_buf, &st->sub_level[0],2, 8, 32, 0, 17);
 }
 
 /******************************************************************************
@@ -255,86 +255,86 @@
 *******************************************************************************/
 
 static void update_cntrl(
-		VadVars * st,                         /* i/o : State structure                    */
-		Word16 level[]                        /* i   : sub-band levels of the input frame */
-		)
+        VadVars * st,                         /* i/o : State structure                    */
+        Word16 level[]                        /* i   : sub-band levels of the input frame */
+        )
 {
-	Word32 i;
-	Word16 num, temp, stat_rat, exp, denom;
-	Word16 alpha;
+    Word32 i;
+    Word16 num, temp, stat_rat, exp, denom;
+    Word16 alpha;
 
-	/* if a tone has been detected for a while, initialize stat_count */
-	if (sub((Word16) (st->tone_flag & 0x7c00), 0x7c00) == 0)
-	{
-		st->stat_count = STAT_COUNT;
-	} else
-	{
-		/* if 8 last vad-decisions have been "0", reinitialize stat_count */
-		if ((st->vadreg & 0x7f80) == 0)
-		{
-			st->stat_count = STAT_COUNT;
-		} else
-		{
-			stat_rat = 0;
-			for (i = 0; i < COMPLEN; i++)
-			{
-				if(level[i] > st->ave_level[i])
-				{
-					num = level[i];
-					denom = st->ave_level[i];
-				} else
-				{
-					num = st->ave_level[i];
-					denom = level[i];
-				}
-				/* Limit nimimum value of num and denom to STAT_THR_LEVEL */
-				if(num < STAT_THR_LEVEL)
-				{
-					num = STAT_THR_LEVEL;
-				}
-				if(denom < STAT_THR_LEVEL)
-				{
-					denom = STAT_THR_LEVEL;
-				}
-				exp = norm_s(denom);
-				denom = denom << exp;
+    /* if a tone has been detected for a while, initialize stat_count */
+    if (sub((Word16) (st->tone_flag & 0x7c00), 0x7c00) == 0)
+    {
+        st->stat_count = STAT_COUNT;
+    } else
+    {
+        /* if 8 last vad-decisions have been "0", reinitialize stat_count */
+        if ((st->vadreg & 0x7f80) == 0)
+        {
+            st->stat_count = STAT_COUNT;
+        } else
+        {
+            stat_rat = 0;
+            for (i = 0; i < COMPLEN; i++)
+            {
+                if(level[i] > st->ave_level[i])
+                {
+                    num = level[i];
+                    denom = st->ave_level[i];
+                } else
+                {
+                    num = st->ave_level[i];
+                    denom = level[i];
+                }
+                /* Limit nimimum value of num and denom to STAT_THR_LEVEL */
+                if(num < STAT_THR_LEVEL)
+                {
+                    num = STAT_THR_LEVEL;
+                }
+                if(denom < STAT_THR_LEVEL)
+                {
+                    denom = STAT_THR_LEVEL;
+                }
+                exp = norm_s(denom);
+                denom = denom << exp;
 
-				/* stat_rat = num/denom * 64 */
-				temp = div_s(num >> 1, denom);
-				stat_rat = add1(stat_rat, shr(temp, (8 - exp)));
-			}
+                /* stat_rat = num/denom * 64 */
+                temp = div_s(num >> 1, denom);
+                stat_rat = add1(stat_rat, shr(temp, (8 - exp)));
+            }
 
-			/* compare stat_rat with a threshold and update stat_count */
-			if(stat_rat > STAT_THR)
-			{
-				st->stat_count = STAT_COUNT;
-			} else
-			{
-				if ((st->vadreg & 0x4000) != 0)
-				{
+            /* compare stat_rat with a threshold and update stat_count */
+            if(stat_rat > STAT_THR)
+            {
+                st->stat_count = STAT_COUNT;
+            } else
+            {
+                if ((st->vadreg & 0x4000) != 0)
+                {
 
-					if (st->stat_count != 0)
-					{
-						st->stat_count = st->stat_count - 1;
-					}
-				}
-			}
-		}
-	}
+                    if (st->stat_count != 0)
+                    {
+                        st->stat_count = st->stat_count - 1;
+                    }
+                }
+            }
+        }
+    }
 
-	/* Update average amplitude estimate for stationarity estimation */
-	alpha = ALPHA4;
-	if(st->stat_count == STAT_COUNT)
-	{
-		alpha = 32767;
-	} else if ((st->vadreg & 0x4000) == 0)
-	{
-		alpha = ALPHA5;
-	}
-	for (i = 0; i < COMPLEN; i++)
-	{
-		st->ave_level[i] = add1(st->ave_level[i], vo_mult_r(alpha, vo_sub(level[i], st->ave_level[i])));
-	}
+    /* Update average amplitude estimate for stationarity estimation */
+    alpha = ALPHA4;
+    if(st->stat_count == STAT_COUNT)
+    {
+        alpha = 32767;
+    } else if ((st->vadreg & 0x4000) == 0)
+    {
+        alpha = ALPHA5;
+    }
+    for (i = 0; i < COMPLEN; i++)
+    {
+        st->ave_level[i] = add1(st->ave_level[i], vo_mult_r(alpha, vo_sub(level[i], st->ave_level[i])));
+    }
 }
 
 /******************************************************************************
@@ -345,38 +345,38 @@
 *******************************************************************************/
 
 static Word16 hangover_addition(                      /* return: VAD_flag indicating final VAD decision */
-		VadVars * st,                         /* i/o : State structure                     */
-		Word16 low_power,                     /* i   : flag power of the input frame    */
-		Word16 hang_len,                      /* i   : hangover length */
-		Word16 burst_len                      /* i   : minimum burst length for hangover addition */
-		)
+        VadVars * st,                         /* i/o : State structure                     */
+        Word16 low_power,                     /* i   : flag power of the input frame    */
+        Word16 hang_len,                      /* i   : hangover length */
+        Word16 burst_len                      /* i   : minimum burst length for hangover addition */
+        )
 {
-	/* if the input power (pow_sum) is lower than a threshold, clear counters and set VAD_flag to "0"         */
-	if (low_power != 0)
-	{
-		st->burst_count = 0;
-		st->hang_count = 0;
-		return 0;
-	}
-	/* update the counters (hang_count, burst_count) */
-	if ((st->vadreg & 0x4000) != 0)
-	{
-		st->burst_count = st->burst_count + 1;
-		if(st->burst_count >= burst_len)
-		{
-			st->hang_count = hang_len;
-		}
-		return 1;
-	} else
-	{
-		st->burst_count = 0;
-		if (st->hang_count > 0)
-		{
-			st->hang_count = st->hang_count - 1;
-			return 1;
-		}
-	}
-	return 0;
+    /* if the input power (pow_sum) is lower than a threshold, clear counters and set VAD_flag to "0"         */
+    if (low_power != 0)
+    {
+        st->burst_count = 0;
+        st->hang_count = 0;
+        return 0;
+    }
+    /* update the counters (hang_count, burst_count) */
+    if ((st->vadreg & 0x4000) != 0)
+    {
+        st->burst_count = st->burst_count + 1;
+        if(st->burst_count >= burst_len)
+        {
+            st->hang_count = hang_len;
+        }
+        return 1;
+    } else
+    {
+        st->burst_count = 0;
+        if (st->hang_count > 0)
+        {
+            st->hang_count = st->hang_count - 1;
+            return 1;
+        }
+    }
+    return 0;
 }
 
 /******************************************************************************
@@ -387,66 +387,66 @@
 *******************************************************************************/
 
 static void noise_estimate_update(
-		VadVars * st,                         /* i/o : State structure                       */
-		Word16 level[]                        /* i   : sub-band levels of the input frame */
-		)
+        VadVars * st,                         /* i/o : State structure                       */
+        Word16 level[]                        /* i   : sub-band levels of the input frame */
+        )
 {
-	Word32 i;
-	Word16 alpha_up, alpha_down, bckr_add = 2;
+    Word32 i;
+    Word16 alpha_up, alpha_down, bckr_add = 2;
 
-	/* Control update of bckr_est[] */
-	update_cntrl(st, level);
+    /* Control update of bckr_est[] */
+    update_cntrl(st, level);
 
-	/* Choose update speed */
-	if ((0x7800 & st->vadreg) == 0)
-	{
-		alpha_up = ALPHA_UP1;
-		alpha_down = ALPHA_DOWN1;
-	} else
-	{
-		if (st->stat_count == 0)
-		{
-			alpha_up = ALPHA_UP2;
-			alpha_down = ALPHA_DOWN2;
-		} else
-		{
-			alpha_up = 0;
-			alpha_down = ALPHA3;
-			bckr_add = 0;
-		}
-	}
+    /* Choose update speed */
+    if ((0x7800 & st->vadreg) == 0)
+    {
+        alpha_up = ALPHA_UP1;
+        alpha_down = ALPHA_DOWN1;
+    } else
+    {
+        if (st->stat_count == 0)
+        {
+            alpha_up = ALPHA_UP2;
+            alpha_down = ALPHA_DOWN2;
+        } else
+        {
+            alpha_up = 0;
+            alpha_down = ALPHA3;
+            bckr_add = 0;
+        }
+    }
 
-	/* Update noise estimate (bckr_est) */
-	for (i = 0; i < COMPLEN; i++)
-	{
-		Word16 temp;
-		temp = (st->old_level[i] - st->bckr_est[i]);
+    /* Update noise estimate (bckr_est) */
+    for (i = 0; i < COMPLEN; i++)
+    {
+        Word16 temp;
+        temp = (st->old_level[i] - st->bckr_est[i]);
 
-		if (temp < 0)
-		{                                  /* update downwards */
-			st->bckr_est[i] = add1(-2, add(st->bckr_est[i],vo_mult_r(alpha_down, temp)));
-			/* limit minimum value of the noise estimate to NOISE_MIN */
-			if(st->bckr_est[i] < NOISE_MIN)
-			{
-				st->bckr_est[i] = NOISE_MIN;
-			}
-		} else
-		{                                  /* update upwards */
-			st->bckr_est[i] = add1(bckr_add, add1(st->bckr_est[i],vo_mult_r(alpha_up, temp)));
+        if (temp < 0)
+        {                                  /* update downwards */
+            st->bckr_est[i] = add1(-2, add(st->bckr_est[i],vo_mult_r(alpha_down, temp)));
+            /* limit minimum value of the noise estimate to NOISE_MIN */
+            if(st->bckr_est[i] < NOISE_MIN)
+            {
+                st->bckr_est[i] = NOISE_MIN;
+            }
+        } else
+        {                                  /* update upwards */
+            st->bckr_est[i] = add1(bckr_add, add1(st->bckr_est[i],vo_mult_r(alpha_up, temp)));
 
-			/* limit maximum value of the noise estimate to NOISE_MAX */
-			if(st->bckr_est[i] > NOISE_MAX)
-			{
-				st->bckr_est[i] = NOISE_MAX;
-			}
-		}
-	}
+            /* limit maximum value of the noise estimate to NOISE_MAX */
+            if(st->bckr_est[i] > NOISE_MAX)
+            {
+                st->bckr_est[i] = NOISE_MAX;
+            }
+        }
+    }
 
-	/* Update signal levels of the previous frame (old_level) */
-	for (i = 0; i < COMPLEN; i++)
-	{
-		st->old_level[i] = level[i];
-	}
+    /* Update signal levels of the previous frame (old_level) */
+    for (i = 0; i < COMPLEN; i++)
+    {
+        st->old_level[i] = level[i];
+    }
 }
 
 /******************************************************************************
@@ -457,100 +457,100 @@
 *******************************************************************************/
 
 static Word16 vad_decision(                           /* return value : VAD_flag */
-		VadVars * st,                         /* i/o : State structure                       */
-		Word16 level[COMPLEN],                /* i   : sub-band levels of the input frame */
-		Word32 pow_sum                        /* i   : power of the input frame           */
-		)
+        VadVars * st,                         /* i/o : State structure                       */
+        Word16 level[COMPLEN],                /* i   : sub-band levels of the input frame */
+        Word32 pow_sum                        /* i   : power of the input frame           */
+        )
 {
-	Word32 i;
-	Word32 L_snr_sum;
-	Word32 L_temp;
-	Word16 vad_thr, temp, noise_level;
-	Word16 low_power_flag;
-	Word16 hang_len, burst_len;
-	Word16 ilog2_speech_level, ilog2_noise_level;
-	Word16 temp2;
+    Word32 i;
+    Word32 L_snr_sum;
+    Word32 L_temp;
+    Word16 vad_thr, temp, noise_level;
+    Word16 low_power_flag;
+    Word16 hang_len, burst_len;
+    Word16 ilog2_speech_level, ilog2_noise_level;
+    Word16 temp2;
 
-	/* Calculate squared sum of the input levels (level) divided by the background noise components
-	 * (bckr_est). */
-	L_snr_sum = 0;
-	for (i = 0; i < COMPLEN; i++)
-	{
-		Word16 exp;
+    /* Calculate squared sum of the input levels (level) divided by the background noise components
+     * (bckr_est). */
+    L_snr_sum = 0;
+    for (i = 0; i < COMPLEN; i++)
+    {
+        Word16 exp;
 
-		exp = norm_s(st->bckr_est[i]);
-		temp = (st->bckr_est[i] << exp);
-		temp = div_s((level[i] >> 1), temp);
-		temp = shl(temp, (exp - (UNIRSHFT - 1)));
-		L_snr_sum = L_mac(L_snr_sum, temp, temp);
-	}
+        exp = norm_s(st->bckr_est[i]);
+        temp = (st->bckr_est[i] << exp);
+        temp = div_s((level[i] >> 1), temp);
+        temp = shl(temp, (exp - (UNIRSHFT - 1)));
+        L_snr_sum = L_mac(L_snr_sum, temp, temp);
+    }
 
-	/* Calculate average level of estimated background noise */
-	L_temp = 0;
-	for (i = 1; i < COMPLEN; i++)          /* ignore lowest band */
-	{
-		L_temp = vo_L_add(L_temp, st->bckr_est[i]);
-	}
+    /* Calculate average level of estimated background noise */
+    L_temp = 0;
+    for (i = 1; i < COMPLEN; i++)          /* ignore lowest band */
+    {
+        L_temp = vo_L_add(L_temp, st->bckr_est[i]);
+    }
 
-	noise_level = extract_h((L_temp << 12));
-	/* if SNR is lower than a threshold (MIN_SPEECH_SNR), and increase speech_level */
-	temp = vo_mult(noise_level, MIN_SPEECH_SNR) << 3;
+    noise_level = extract_h((L_temp << 12));
+    /* if SNR is lower than a threshold (MIN_SPEECH_SNR), and increase speech_level */
+    temp = vo_mult(noise_level, MIN_SPEECH_SNR) << 3;
 
-	if(st->speech_level < temp)
-	{
-		st->speech_level = temp;
-	}
-	ilog2_noise_level = ilog2(noise_level);
+    if(st->speech_level < temp)
+    {
+        st->speech_level = temp;
+    }
+    ilog2_noise_level = ilog2(noise_level);
 
-	/* If SNR is very poor, speech_level is probably corrupted by noise level. This is correctred by
-	 * subtracting MIN_SPEECH_SNR*noise_level from speech level */
-	ilog2_speech_level = ilog2(st->speech_level - temp);
+    /* If SNR is very poor, speech_level is probably corrupted by noise level. This is correctred by
+     * subtracting MIN_SPEECH_SNR*noise_level from speech level */
+    ilog2_speech_level = ilog2(st->speech_level - temp);
 
-	temp = add1(vo_mult(NO_SLOPE, (ilog2_noise_level - NO_P1)), THR_HIGH);
+    temp = add1(vo_mult(NO_SLOPE, (ilog2_noise_level - NO_P1)), THR_HIGH);
 
-	temp2 = add1(SP_CH_MIN, vo_mult(SP_SLOPE, (ilog2_speech_level - SP_P1)));
-	if (temp2 < SP_CH_MIN)
-	{
-		temp2 = SP_CH_MIN;
-	}
-	if (temp2 > SP_CH_MAX)
-	{
-		temp2 = SP_CH_MAX;
-	}
-	vad_thr = temp + temp2;
+    temp2 = add1(SP_CH_MIN, vo_mult(SP_SLOPE, (ilog2_speech_level - SP_P1)));
+    if (temp2 < SP_CH_MIN)
+    {
+        temp2 = SP_CH_MIN;
+    }
+    if (temp2 > SP_CH_MAX)
+    {
+        temp2 = SP_CH_MAX;
+    }
+    vad_thr = temp + temp2;
 
-	if(vad_thr < THR_MIN)
-	{
-		vad_thr = THR_MIN;
-	}
-	/* Shift VAD decision register */
-	st->vadreg = (st->vadreg >> 1);
+    if(vad_thr < THR_MIN)
+    {
+        vad_thr = THR_MIN;
+    }
+    /* Shift VAD decision register */
+    st->vadreg = (st->vadreg >> 1);
 
-	/* Make intermediate VAD decision */
-	if(L_snr_sum > vo_L_mult(vad_thr, (512 * COMPLEN)))
-	{
-		st->vadreg = (Word16) (st->vadreg | 0x4000);
-	}
-	/* check if the input power (pow_sum) is lower than a threshold" */
-	if(pow_sum < VAD_POW_LOW)
-	{
-		low_power_flag = 1;
-	} else
-	{
-		low_power_flag = 0;
-	}
-	/* Update background noise estimates */
-	noise_estimate_update(st, level);
+    /* Make intermediate VAD decision */
+    if(L_snr_sum > vo_L_mult(vad_thr, (512 * COMPLEN)))
+    {
+        st->vadreg = (Word16) (st->vadreg | 0x4000);
+    }
+    /* check if the input power (pow_sum) is lower than a threshold" */
+    if(pow_sum < VAD_POW_LOW)
+    {
+        low_power_flag = 1;
+    } else
+    {
+        low_power_flag = 0;
+    }
+    /* Update background noise estimates */
+    noise_estimate_update(st, level);
 
-	/* Calculate values for hang_len and burst_len based on vad_thr */
-	hang_len = add1(vo_mult(HANG_SLOPE, (vad_thr - HANG_P1)), HANG_HIGH);
-	if(hang_len < HANG_LOW)
-	{
-		hang_len = HANG_LOW;
-	}
-	burst_len = add1(vo_mult(BURST_SLOPE, (vad_thr - BURST_P1)), BURST_HIGH);
+    /* Calculate values for hang_len and burst_len based on vad_thr */
+    hang_len = add1(vo_mult(HANG_SLOPE, (vad_thr - HANG_P1)), HANG_HIGH);
+    if(hang_len < HANG_LOW)
+    {
+        hang_len = HANG_LOW;
+    }
+    burst_len = add1(vo_mult(BURST_SLOPE, (vad_thr - BURST_P1)), BURST_HIGH);
 
-	return (hangover_addition(st, low_power_flag, hang_len, burst_len));
+    return (hangover_addition(st, low_power_flag, hang_len, burst_len));
 }
 
 /******************************************************************************
@@ -566,54 +566,54 @@
 *******************************************************************************/
 
 static void Estimate_Speech(
-		VadVars * st,                         /* i/o : State structure    */
-		Word16 in_level                       /* level of the input frame */
-		)
+        VadVars * st,                         /* i/o : State structure    */
+        Word16 in_level                       /* level of the input frame */
+        )
 {
-	Word16 alpha;
+    Word16 alpha;
 
-	/* if the required activity count cannot be achieved, reset counters */
-	if((st->sp_est_cnt - st->sp_max_cnt) > (SP_EST_COUNT - SP_ACTIVITY_COUNT))
-	{
-		st->sp_est_cnt = 0;
-		st->sp_max = 0;
-		st->sp_max_cnt = 0;
-	}
-	st->sp_est_cnt += 1;
+    /* if the required activity count cannot be achieved, reset counters */
+    if((st->sp_est_cnt - st->sp_max_cnt) > (SP_EST_COUNT - SP_ACTIVITY_COUNT))
+    {
+        st->sp_est_cnt = 0;
+        st->sp_max = 0;
+        st->sp_max_cnt = 0;
+    }
+    st->sp_est_cnt += 1;
 
-	if (((st->vadreg & 0x4000)||(in_level > st->speech_level)) && (in_level > MIN_SPEECH_LEVEL1))
-	{
-		/* update sp_max */
-		if(in_level > st->sp_max)
-		{
-			st->sp_max = in_level;
-		}
-		st->sp_max_cnt += 1;
+    if (((st->vadreg & 0x4000)||(in_level > st->speech_level)) && (in_level > MIN_SPEECH_LEVEL1))
+    {
+        /* update sp_max */
+        if(in_level > st->sp_max)
+        {
+            st->sp_max = in_level;
+        }
+        st->sp_max_cnt += 1;
 
-		if(st->sp_max_cnt >= SP_ACTIVITY_COUNT)
-		{
-			Word16 tmp;
-			/* update speech estimate */
-			tmp = (st->sp_max >> 1);      /* scale to get "average" speech level */
+        if(st->sp_max_cnt >= SP_ACTIVITY_COUNT)
+        {
+            Word16 tmp;
+            /* update speech estimate */
+            tmp = (st->sp_max >> 1);      /* scale to get "average" speech level */
 
-			/* select update speed */
-			if(tmp > st->speech_level)
-			{
-				alpha = ALPHA_SP_UP;
-			} else
-			{
-				alpha = ALPHA_SP_DOWN;
-			}
-			if(tmp > MIN_SPEECH_LEVEL2)
-			{
-				st->speech_level = add1(st->speech_level, vo_mult_r(alpha, vo_sub(tmp, st->speech_level)));
-			}
-			/* clear all counters used for speech estimation */
-			st->sp_max = 0;
-			st->sp_max_cnt = 0;
-			st->sp_est_cnt = 0;
-		}
-	}
+            /* select update speed */
+            if(tmp > st->speech_level)
+            {
+                alpha = ALPHA_SP_UP;
+            } else
+            {
+                alpha = ALPHA_SP_DOWN;
+            }
+            if(tmp > MIN_SPEECH_LEVEL2)
+            {
+                st->speech_level = add1(st->speech_level, vo_mult_r(alpha, vo_sub(tmp, st->speech_level)));
+            }
+            /* clear all counters used for speech estimation */
+            st->sp_max = 0;
+            st->sp_max_cnt = 0;
+            st->sp_est_cnt = 0;
+        }
+    }
 }
 
 /******************************************************************************
@@ -624,30 +624,30 @@
 *******************************************************************************/
 
 Word16 wb_vad_init(                        /* return: non-zero with error, zero for ok. */
-		VadVars ** state,                     /* i/o : State structure    */
-		VO_MEM_OPERATOR *pMemOP
-		)
+        VadVars ** state,                     /* i/o : State structure    */
+        VO_MEM_OPERATOR *pMemOP
+        )
 {
-	VadVars *s;
+    VadVars *s;
 
-	if (state == (VadVars **) NULL)
-	{
-		fprintf(stderr, "vad_init: invalid parameter\n");
-		return -1;
-	}
-	*state = NULL;
+    if (state == (VadVars **) NULL)
+    {
+        fprintf(stderr, "vad_init: invalid parameter\n");
+        return -1;
+    }
+    *state = NULL;
 
-	/* allocate memory */
-	if ((s = (VadVars *) mem_malloc(pMemOP, sizeof(VadVars), 32, VO_INDEX_ENC_AMRWB)) == NULL)
-	{
-		fprintf(stderr, "vad_init: can not malloc state structure\n");
-		return -1;
-	}
-	wb_vad_reset(s);
+    /* allocate memory */
+    if ((s = (VadVars *) mem_malloc(pMemOP, sizeof(VadVars), 32, VO_INDEX_ENC_AMRWB)) == NULL)
+    {
+        fprintf(stderr, "vad_init: can not malloc state structure\n");
+        return -1;
+    }
+    wb_vad_reset(s);
 
-	*state = s;
+    *state = s;
 
-	return 0;
+    return 0;
 }
 
 /******************************************************************************
@@ -658,51 +658,51 @@
 *******************************************************************************/
 
 Word16 wb_vad_reset(                       /* return: non-zero with error, zero for ok. */
-		VadVars * state                       /* i/o : State structure    */
-		)
+        VadVars * state                       /* i/o : State structure    */
+        )
 {
-	Word32 i, j;
+    Word32 i, j;
 
-	if (state == (VadVars *) NULL)
-	{
-		fprintf(stderr, "vad_reset: invalid parameter\n");
-		return -1;
-	}
-	state->tone_flag = 0;
-	state->vadreg = 0;
-	state->hang_count = 0;
-	state->burst_count = 0;
-	state->hang_count = 0;
+    if (state == (VadVars *) NULL)
+    {
+        fprintf(stderr, "vad_reset: invalid parameter\n");
+        return -1;
+    }
+    state->tone_flag = 0;
+    state->vadreg = 0;
+    state->hang_count = 0;
+    state->burst_count = 0;
+    state->hang_count = 0;
 
-	/* initialize memory used by the filter bank */
-	for (i = 0; i < F_5TH_CNT; i++)
-	{
-		for (j = 0; j < 2; j++)
-		{
-			state->a_data5[i][j] = 0;
-		}
-	}
+    /* initialize memory used by the filter bank */
+    for (i = 0; i < F_5TH_CNT; i++)
+    {
+        for (j = 0; j < 2; j++)
+        {
+            state->a_data5[i][j] = 0;
+        }
+    }
 
-	for (i = 0; i < F_3TH_CNT; i++)
-	{
-		state->a_data3[i] = 0;
-	}
+    for (i = 0; i < F_3TH_CNT; i++)
+    {
+        state->a_data3[i] = 0;
+    }
 
-	/* initialize the rest of the memory */
-	for (i = 0; i < COMPLEN; i++)
-	{
-		state->bckr_est[i] = NOISE_INIT;
-		state->old_level[i] = NOISE_INIT;
-		state->ave_level[i] = NOISE_INIT;
-		state->sub_level[i] = 0;
-	}
+    /* initialize the rest of the memory */
+    for (i = 0; i < COMPLEN; i++)
+    {
+        state->bckr_est[i] = NOISE_INIT;
+        state->old_level[i] = NOISE_INIT;
+        state->ave_level[i] = NOISE_INIT;
+        state->sub_level[i] = 0;
+    }
 
-	state->sp_est_cnt = 0;
-	state->sp_max = 0;
-	state->sp_max_cnt = 0;
-	state->speech_level = SPEECH_LEVEL_INIT;
-	state->prev_pow_sum = 0;
-	return 0;
+    state->sp_est_cnt = 0;
+    state->sp_max = 0;
+    state->sp_max_cnt = 0;
+    state->speech_level = SPEECH_LEVEL_INIT;
+    state->prev_pow_sum = 0;
+    return 0;
 }
 
 /******************************************************************************
@@ -713,16 +713,16 @@
 *******************************************************************************/
 
 void wb_vad_exit(
-		VadVars ** state,                      /* i/o : State structure    */
-		VO_MEM_OPERATOR *pMemOP
-		)
+        VadVars ** state,                      /* i/o : State structure    */
+        VO_MEM_OPERATOR *pMemOP
+        )
 {
-	if (state == NULL || *state == NULL)
-		return;
-	/* deallocate memory */
-	mem_free(pMemOP, *state, VO_INDEX_ENC_AMRWB);
-	*state = NULL;
-	return;
+    if (state == NULL || *state == NULL)
+        return;
+    /* deallocate memory */
+    mem_free(pMemOP, *state, VO_INDEX_ENC_AMRWB);
+    *state = NULL;
+    return;
 }
 
 /******************************************************************************
@@ -735,18 +735,18 @@
 *******************************************************************************/
 
 void wb_vad_tone_detection(
-		VadVars * st,                         /* i/o : State struct            */
-		Word16 p_gain                         /* pitch gain      */
-		)
+        VadVars * st,                         /* i/o : State struct            */
+        Word16 p_gain                         /* pitch gain      */
+        )
 {
-	/* update tone flag */
-	st->tone_flag = (st->tone_flag >> 1);
+    /* update tone flag */
+    st->tone_flag = (st->tone_flag >> 1);
 
-	/* if (pitch_gain > TONE_THR) set tone flag */
-	if (p_gain > TONE_THR)
-	{
-		st->tone_flag = (Word16) (st->tone_flag | 0x4000);
-	}
+    /* if (pitch_gain > TONE_THR) set tone flag */
+    if (p_gain > TONE_THR)
+    {
+        st->tone_flag = (Word16) (st->tone_flag | 0x4000);
+    }
 }
 
 /******************************************************************************
@@ -757,50 +757,50 @@
 *******************************************************************************/
 
 Word16 wb_vad(                                /* Return value : VAD Decision, 1 = speech, 0 = noise */
-		VadVars * st,                         /* i/o : State structure                 */
-		Word16 in_buf[]                       /* i   : samples of the input frame   */
-	     )
+        VadVars * st,                         /* i/o : State structure                 */
+        Word16 in_buf[]                       /* i   : samples of the input frame   */
+         )
 {
-	Word16 level[COMPLEN];
-	Word32 i;
-	Word16 VAD_flag, temp;
-	Word32 L_temp, pow_sum;
+    Word16 level[COMPLEN];
+    Word32 i;
+    Word16 VAD_flag, temp;
+    Word32 L_temp, pow_sum;
 
-	/* Calculate power of the input frame. */
-	L_temp = 0L;
-	for (i = 0; i < FRAME_LEN; i++)
-	{
-		L_temp = L_mac(L_temp, in_buf[i], in_buf[i]);
-	}
+    /* Calculate power of the input frame. */
+    L_temp = 0L;
+    for (i = 0; i < FRAME_LEN; i++)
+    {
+        L_temp = L_mac(L_temp, in_buf[i], in_buf[i]);
+    }
 
-	/* pow_sum = power of current frame and previous frame */
-	pow_sum = L_add(L_temp, st->prev_pow_sum);
+    /* pow_sum = power of current frame and previous frame */
+    pow_sum = L_add(L_temp, st->prev_pow_sum);
 
-	/* save power of current frame for next call */
-	st->prev_pow_sum = L_temp;
+    /* save power of current frame for next call */
+    st->prev_pow_sum = L_temp;
 
-	/* If input power is very low, clear tone flag */
-	if (pow_sum < POW_TONE_THR)
-	{
-		st->tone_flag = (Word16) (st->tone_flag & 0x1fff);
-	}
-	/* Run the filter bank and calculate signal levels at each band */
-	filter_bank(st, in_buf, level);
+    /* If input power is very low, clear tone flag */
+    if (pow_sum < POW_TONE_THR)
+    {
+        st->tone_flag = (Word16) (st->tone_flag & 0x1fff);
+    }
+    /* Run the filter bank and calculate signal levels at each band */
+    filter_bank(st, in_buf, level);
 
-	/* compute VAD decision */
-	VAD_flag = vad_decision(st, level, pow_sum);
+    /* compute VAD decision */
+    VAD_flag = vad_decision(st, level, pow_sum);
 
-	/* Calculate input level */
-	L_temp = 0;
-	for (i = 1; i < COMPLEN; i++)          /* ignore lowest band */
-	{
-		L_temp = vo_L_add(L_temp, level[i]);
-	}
+    /* Calculate input level */
+    L_temp = 0;
+    for (i = 1; i < COMPLEN; i++)          /* ignore lowest band */
+    {
+        L_temp = vo_L_add(L_temp, level[i]);
+    }
 
-	temp = extract_h(L_temp << 12);
+    temp = extract_h(L_temp << 12);
 
-	Estimate_Speech(st, temp);             /* Estimate speech level */
-	return (VAD_flag);
+    Estimate_Speech(st, temp);             /* Estimate speech level */
+    return (VAD_flag);
 }
 
 
diff --git a/media/libstagefright/codecs/amrwbenc/src/weight_a.c b/media/libstagefright/codecs/amrwbenc/src/weight_a.c
index a02b48d..23b774e 100644
--- a/media/libstagefright/codecs/amrwbenc/src/weight_a.c
+++ b/media/libstagefright/codecs/amrwbenc/src/weight_a.c
@@ -18,7 +18,7 @@
 *       File: weight_a.c                                               *
 *                                                                      *
 *       Description:Weighting of LPC coefficients                      *
-*	               ap[i] = a[i] * (gamma ** i)                     *
+*                  ap[i] = a[i] * (gamma ** i)                     *
 *                                                                      *
 ************************************************************************/
 
@@ -26,22 +26,22 @@
 #include "basic_op.h"
 
 void Weight_a(
-		Word16 a[],                           /* (i) Q12 : a[m+1]  LPC coefficients             */
-		Word16 ap[],                          /* (o) Q12 : Spectral expanded LPC coefficients   */
-		Word16 gamma,                         /* (i) Q15 : Spectral expansion factor.           */
-		Word16 m                              /* (i)     : LPC order.                           */
-	     )
+        Word16 a[],                           /* (i) Q12 : a[m+1]  LPC coefficients             */
+        Word16 ap[],                          /* (o) Q12 : Spectral expanded LPC coefficients   */
+        Word16 gamma,                         /* (i) Q15 : Spectral expansion factor.           */
+        Word16 m                              /* (i)     : LPC order.                           */
+         )
 {
-	Word32 num = m - 1, fac;
-	*ap++ = *a++;
-	fac = gamma;
-	do{
-		*ap++ =(Word16)(((vo_L_mult((*a++), fac)) + 0x8000) >> 16);
-		fac = (vo_L_mult(fac, gamma) + 0x8000) >> 16;
-	}while(--num != 0);
+    Word32 num = m - 1, fac;
+    *ap++ = *a++;
+    fac = gamma;
+    do{
+        *ap++ =(Word16)(((vo_L_mult((*a++), fac)) + 0x8000) >> 16);
+        fac = (vo_L_mult(fac, gamma) + 0x8000) >> 16;
+    }while(--num != 0);
 
-	*ap++ = (Word16)(((vo_L_mult((*a++), fac)) + 0x8000) >> 16);
-	return;
+    *ap++ = (Word16)(((vo_L_mult((*a++), fac)) + 0x8000) >> 16);
+    return;
 }
 
 
diff --git a/media/libstagefright/codecs/avc/common/Android.mk b/media/libstagefright/codecs/avc/common/Android.mk
index 844ef0a..ed0f8ca 100644
--- a/media/libstagefright/codecs/avc/common/Android.mk
+++ b/media/libstagefright/codecs/avc/common/Android.mk
@@ -17,5 +17,7 @@
  	$(LOCAL_PATH)/include
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index 2ceebc8..8ff2f35 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -31,6 +31,8 @@
     -DOSCL_IMPORT_REF= -D"OSCL_UNUSED_ARG(x)=(void)(x)" -DOSCL_EXPORT_REF=
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
 
@@ -72,5 +74,37 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+        test/h264_enc_test.cpp
+
+LOCAL_C_INCLUDES := \
+        $(LOCAL_PATH)/src \
+        $(LOCAL_PATH)/include \
+        $(LOCAL_PATH)/../common/include \
+        $(LOCAL_PATH)/../common
+
+LOCAL_CFLAGS := \
+    -DOSCL_IMPORT_REF= -DOSCL_UNUSED_ARG= -DOSCL_EXPORT_REF=
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
+
+LOCAL_STATIC_LIBRARIES := \
+        libstagefright_avcenc
+
+LOCAL_SHARED_LIBRARIES := \
+        libstagefright_avc_common
+
+LOCAL_MODULE := libstagefright_h264enc_test
+
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp b/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
index 0b8d9e2..d0bbee2 100644
--- a/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
+++ b/media/libstagefright/codecs/avc/enc/src/findhalfpel.cpp
@@ -23,19 +23,6 @@
 
 #define PREF_16_VEC 129     /* 1MV bias versus 4MVs*/
 
-const static int distance_tab[9][9] =   /* [hp_guess][k] */
-{
-    {0, 1, 1, 1, 1, 1, 1, 1, 1},
-    {1, 0, 1, 2, 3, 4, 3, 2, 1},
-    {1, 0, 0, 0, 1, 2, 3, 2, 1},
-    {1, 2, 1, 0, 1, 2, 3, 4, 3},
-    {1, 2, 1, 0, 0, 0, 1, 2, 3},
-    {1, 4, 3, 2, 1, 0, 1, 2, 3},
-    {1, 2, 3, 2, 1, 0, 0, 0, 1},
-    {1, 2, 3, 4, 3, 2, 1, 0, 1},
-    {1, 0, 1, 2, 3, 2, 1, 0, 0}
-};
-
 #define CLIP_RESULT(x)      if((uint)x > 0xFF){ \
                  x = 0xFF & (~(x>>31));}
 
diff --git a/media/libstagefright/codecs/avc/enc/test/h264_enc_test.cpp b/media/libstagefright/codecs/avc/enc/test/h264_enc_test.cpp
new file mode 100644
index 0000000..7a782a8
--- /dev/null
+++ b/media/libstagefright/codecs/avc/enc/test/h264_enc_test.cpp
@@ -0,0 +1,357 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "avcenc_api.h"
+#include "avcenc_int.h"
+
+// Constants.
+enum {
+    kMaxWidth         = 720,
+    kMaxHeight        = 480,
+    kMaxFrameRate     = 30,
+    kMaxBitrate       = 2048, // in kbps.
+    kInputBufferSize  = (kMaxWidth * kMaxHeight * 3) / 2, // For YUV 420 format.
+    kOutputBufferSize = kInputBufferSize,
+    kMaxDpbBuffers    = 17,
+    kIDRFrameRefreshIntervalInSec = 1,
+};
+
+
+static void *MallocCb(void * /*userData*/, int32_t size, int32_t /*attrs*/) {
+    void *ptr = calloc(size, 1);
+    return ptr;
+}
+
+static void FreeCb(void * /*userData*/, void *ptr) {
+    free(ptr);
+}
+
+static int32_t DpbAllocCb(void * /*userData*/,
+        unsigned int sizeInMbs, unsigned int numBuffers) {
+
+    size_t frameSize = (sizeInMbs << 7) * 3;
+    if(numBuffers < kMaxDpbBuffers && frameSize <= kInputBufferSize) {
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+static int32_t BindFrameCb(void *userData, int32_t index, uint8_t **yuv) {
+     assert(index < kMaxDpbBuffers);
+     uint8_t** dpbBuffer = static_cast<uint8_t**>(userData);
+     *yuv = dpbBuffer[index];
+     return 1;
+}
+
+static void UnbindFrameCb(void * /*userData*/, int32_t /*index*/) {
+}
+
+int main(int argc, char *argv[]) {
+
+    if (argc < 7) {
+        fprintf(stderr, "Usage %s <input yuv> <output file> <width> <height>"
+                        " <frame rate> <bitrate in kbps>\n", argv[0]);
+        fprintf(stderr, "Max width %d\n", kMaxWidth);
+        fprintf(stderr, "Max height %d\n", kMaxHeight);
+        fprintf(stderr, "Max framerate %d\n", kMaxFrameRate);
+        fprintf(stderr, "Max bitrate %d kbps\n", kMaxBitrate);
+        return EXIT_FAILURE;
+    }
+
+    // Read height and width.
+    int32_t width;
+    int32_t height;
+    width = atoi(argv[3]);
+    height = atoi(argv[4]);
+    if (width > kMaxWidth || height > kMaxHeight || width <= 0 || height <= 0) {
+        fprintf(stderr, "Unsupported dimensions %dx%d\n", width, height);
+        return EXIT_FAILURE;
+    }
+
+    if (width % 16 != 0 || height % 16 != 0) {
+        fprintf(stderr, "Video frame size %dx%d must be a multiple of 16\n",
+            width, height);
+        return EXIT_FAILURE;
+    }
+
+    // Read frame rate.
+    int32_t frameRate;
+    frameRate = atoi(argv[5]);
+    if (frameRate > kMaxFrameRate || frameRate <= 0) {
+        fprintf(stderr, "Unsupported frame rate %d\n", frameRate);
+        return EXIT_FAILURE;
+    }
+
+    // Read bit rate.
+    int32_t bitrate;
+    bitrate = atoi(argv[6]);
+    if (bitrate > kMaxBitrate || bitrate <= 0) {
+        fprintf(stderr, "Unsupported bitrate %d\n", bitrate);
+        return EXIT_FAILURE;
+    }
+    bitrate *= 1024; // kbps to bps.
+
+    // Open the input file.
+    FILE *fpInput = fopen(argv[1], "rb");
+    if (!fpInput) {
+        fprintf(stderr, "Could not open %s\n", argv[1]);
+        return EXIT_FAILURE;
+    }
+
+    // Open the output file.
+    FILE *fpOutput = fopen(argv[2], "wb");
+    if (!fpOutput) {
+        fprintf(stderr, "Could not open %s\n", argv[2]);
+        fclose(fpInput);
+        return EXIT_FAILURE;
+    }
+
+    // Allocate input buffer.
+    uint8_t *inputBuf = (uint8_t *)malloc(kInputBufferSize);
+    assert(inputBuf != NULL);
+
+    // Allocate output buffer.
+    uint8_t *outputBuf = (uint8_t *)malloc(kOutputBufferSize);
+    assert(outputBuf != NULL);
+
+    // Allocate dpb buffers.
+    uint8_t * dpbBuffers[kMaxDpbBuffers];
+    for (int i = 0; i < kMaxDpbBuffers; ++i) {
+        dpbBuffers[i] = (uint8_t *)malloc(kInputBufferSize);
+        assert(dpbBuffers[i] != NULL);
+    }
+
+    // Initialize the encoder parameters.
+    tagAVCEncParam encParams;
+    memset(&encParams, 0, sizeof(tagAVCEncParam));
+    encParams.rate_control = AVC_ON;
+    encParams.initQP = 0;
+    encParams.init_CBP_removal_delay = 1600;
+
+    encParams.intramb_refresh = 0;
+    encParams.auto_scd = AVC_ON;
+    encParams.out_of_band_param_set = AVC_ON;
+    encParams.poc_type = 2;
+    encParams.log2_max_poc_lsb_minus_4 = 12;
+    encParams.delta_poc_zero_flag = 0;
+    encParams.offset_poc_non_ref = 0;
+    encParams.offset_top_bottom = 0;
+    encParams.num_ref_in_cycle = 0;
+    encParams.offset_poc_ref = NULL;
+
+    encParams.num_ref_frame = 1;
+    encParams.num_slice_group = 1;
+    encParams.fmo_type = 0;
+
+    encParams.db_filter = AVC_ON;
+    encParams.disable_db_idc = 0;
+
+    encParams.alpha_offset = 0;
+    encParams.beta_offset = 0;
+    encParams.constrained_intra_pred = AVC_OFF;
+
+    encParams.data_par = AVC_OFF;
+    encParams.fullsearch = AVC_OFF;
+    encParams.search_range = 16;
+    encParams.sub_pel = AVC_OFF;
+    encParams.submb_pred = AVC_OFF;
+    encParams.rdopt_mode = AVC_OFF;
+    encParams.bidir_pred = AVC_OFF;
+
+    encParams.use_overrun_buffer = AVC_OFF;
+
+    encParams.width = width;
+    encParams.height = height;
+    encParams.bitrate = bitrate;
+    encParams.frame_rate = 1000 * frameRate;  // In frames/ms.
+    encParams.CPB_size = (uint32_t) (bitrate >> 1);
+
+    int32_t  IDRFrameRefreshIntervalInSec = kIDRFrameRefreshIntervalInSec;
+    if (IDRFrameRefreshIntervalInSec == 0) {
+        encParams.idr_period = 1;  // All I frames.
+    } else {
+        encParams.idr_period = (IDRFrameRefreshIntervalInSec * frameRate);
+    }
+
+    int32_t nMacroBlocks = ((((width + 15) >> 4) << 4) *
+            (((height + 15) >> 4) << 4)) >> 8;
+    uint32_t *sliceGroup = (uint32_t *) malloc(sizeof(uint32_t) * nMacroBlocks);
+    assert(sliceGroup != NULL);
+    for (int i = 0, idx = 0; i < nMacroBlocks; ++i) {
+        sliceGroup[i] = idx++;
+        if (idx >= encParams.num_slice_group) {
+            idx = 0;
+        }
+    }
+    encParams.slice_group = sliceGroup;
+    encParams.profile = AVC_BASELINE;
+    encParams.level = AVC_LEVEL2;
+
+    // Initialize the handle.
+    tagAVCHandle handle;
+    memset(&handle, 0, sizeof(tagAVCHandle));
+    handle.AVCObject = NULL;
+    handle.userData = dpbBuffers;
+    handle.CBAVC_DPBAlloc = DpbAllocCb;
+    handle.CBAVC_FrameBind = BindFrameCb;
+    handle.CBAVC_FrameUnbind = UnbindFrameCb;
+    handle.CBAVC_Malloc = MallocCb;
+    handle.CBAVC_Free = FreeCb;
+
+    // Initialize the encoder.
+    AVCEnc_Status status;
+    status = PVAVCEncInitialize(&handle, &encParams, NULL, NULL);
+    if (status != AVCENC_SUCCESS) {
+        fprintf(stderr, "Failed to initialize the encoder\n");
+
+        // Release resources.
+        fclose(fpInput);
+        fclose(fpOutput);
+        free(sliceGroup);
+        free(inputBuf);
+        free(outputBuf);
+        for (int i = 0; i < kMaxDpbBuffers; ++i) {
+            free(dpbBuffers[i]);
+        }
+        return EXIT_FAILURE;
+    }
+
+    // Encode Sequence Parameter Set.
+    uint32_t dataLength = kOutputBufferSize;
+    int32_t type;
+    status = PVAVCEncodeNAL(&handle, outputBuf, &dataLength, &type);
+    assert(type == AVC_NALTYPE_SPS);
+    fwrite("\x00\x00\x00\x01", 1, 4, fpOutput); // Start Code.
+    fwrite(outputBuf, 1, dataLength, fpOutput); // SPS.
+
+    // Encode Picture Paramater Set.
+    dataLength = kOutputBufferSize;
+    status = PVAVCEncodeNAL(&handle, outputBuf, &dataLength, &type);
+    assert(type == AVC_NALTYPE_PPS);
+    fwrite("\x00\x00\x00\x01", 1, 4, fpOutput); // Start Code.
+    fwrite(outputBuf, 1, dataLength, fpOutput); // PPS.
+
+    // Core loop.
+    int32_t retVal = EXIT_SUCCESS;
+    int32_t frameSize = (width * height * 3) / 2;
+    int32_t numInputFrames = 0;
+    int32_t numNalEncoded = 0;
+    bool readyForNextFrame = true;
+
+    while (1) {
+        if (readyForNextFrame == true) {
+            // Read the input frame.
+            int32_t bytesRead;
+            bytesRead = fread(inputBuf, 1, frameSize, fpInput);
+            if (bytesRead != frameSize) {
+                break; // End of file.
+            }
+
+            // Set the input frame.
+            AVCFrameIO vin;
+            memset(&vin, 0, sizeof(vin));
+            vin.height = ((height + 15) >> 4) << 4;
+            vin.pitch  = ((width  + 15) >> 4) << 4;
+            vin.coding_timestamp = (numInputFrames * 1000) / frameRate;  // in ms
+            vin.YCbCr[0] = inputBuf;
+            vin.YCbCr[1] = vin.YCbCr[0] + vin.height * vin.pitch;
+            vin.YCbCr[2] = vin.YCbCr[1] + ((vin.height * vin.pitch) >> 2);
+            vin.disp_order = numInputFrames;
+
+            status = PVAVCEncSetInput(&handle, &vin);
+            if (status == AVCENC_SUCCESS || status == AVCENC_NEW_IDR) {
+                readyForNextFrame = false;
+                ++numInputFrames;
+            } else if (status < AVCENC_SUCCESS) {
+                fprintf(stderr, "Error %d while setting input frame\n", status);
+                retVal = EXIT_FAILURE;
+                break;
+            } else {
+                fprintf(stderr, "Frame drop\n");
+                readyForNextFrame = true;
+                ++numInputFrames;
+                continue;
+            }
+        }
+
+        // Encode the input frame.
+        dataLength = kOutputBufferSize;
+        status = PVAVCEncodeNAL(&handle, outputBuf, &dataLength, &type);
+        if (status == AVCENC_SUCCESS) {
+            PVAVCEncGetOverrunBuffer(&handle);
+        } else if (status == AVCENC_PICTURE_READY) {
+            PVAVCEncGetOverrunBuffer(&handle);
+            readyForNextFrame = true;
+            AVCFrameIO recon;
+            if (PVAVCEncGetRecon(&handle, &recon) == AVCENC_SUCCESS) {
+                PVAVCEncReleaseRecon(&handle, &recon);
+            }
+        } else {
+            dataLength = 0;
+            readyForNextFrame = true;
+        }
+
+        if (status < AVCENC_SUCCESS) {
+            fprintf(stderr, "Error %d while encoding frame\n", status);
+            retVal = EXIT_FAILURE;
+            break;
+        }
+
+        numNalEncoded++;
+
+        // Write the output.
+        if (dataLength > 0) {
+            fwrite("\x00\x00\x00\x01", 1, 4, fpOutput); // Start Code.
+            fwrite(outputBuf, 1, dataLength, fpOutput); // NAL.
+            printf("NAL %d of size %d written\n", numNalEncoded, dataLength + 4);
+        }
+    }
+
+    // Close input and output file.
+    fclose(fpInput);
+    fclose(fpOutput);
+
+    // Free allocated memory.
+    free(sliceGroup);
+    free(inputBuf);
+    free(outputBuf);
+    for (int i = 0; i < kMaxDpbBuffers; ++i) {
+        free(dpbBuffers[i]);
+    }
+
+    // Close encoder instance.
+    PVAVCCleanUpEncoder(&handle);
+
+    return retVal;
+}
diff --git a/media/libstagefright/codecs/avcdec/Android.mk b/media/libstagefright/codecs/avcdec/Android.mk
index 902ab57..ef0dbfd 100644
--- a/media/libstagefright/codecs/avcdec/Android.mk
+++ b/media/libstagefright/codecs/avcdec/Android.mk
@@ -20,6 +20,9 @@
 LOCAL_SHARED_LIBRARIES  += libutils
 LOCAL_SHARED_LIBRARIES  += liblog
 
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
+
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index 2130ccf..61b9bfd 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -416,7 +416,7 @@
     uint8_t *pBuf;
     if (outHeader) {
         if (outHeader->nAllocLen < sizeY + (sizeUV * 2)) {
-            android_errorWriteLog(0x534e4554, "27569635");
+            android_errorWriteLog(0x534e4554, "27833616");
             return false;
         }
         pBuf = outHeader->pBuffer;
@@ -443,7 +443,7 @@
         uint32_t bufferSize = displayStride * displayHeight * 3 / 2;
         mFlushOutBuffer = (uint8_t *)memalign(128, bufferSize);
         if (NULL == mFlushOutBuffer) {
-            ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+            ALOGE("Could not allocate flushOutputBuffer of size %u", bufferSize);
             return;
         }
 
@@ -497,16 +497,6 @@
     List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
     List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
 
-    /* If input EOS is seen and decoder is not in flush mode,
-     * set the decoder in flush mode.
-     * There can be a case where EOS is sent along with last picture data
-     * In that case, only after decoding that input data, decoder has to be
-     * put in flush. This case is handled here  */
-
-    if (mReceivedEOS && !mIsInFlush) {
-        setFlushMode();
-    }
-
     while (!outQueue.empty()) {
         BufferInfo *inInfo;
         OMX_BUFFERHEADERTYPE *inHeader;
@@ -638,6 +628,8 @@
                 mChangingResolution = false;
                 resetDecoder();
                 resetPlugin();
+                mStride = outputBufferWidth();
+                setParams(mStride);
                 continue;
             }
 
@@ -672,7 +664,7 @@
                 outInfo = NULL;
                 notifyFillBufferDone(outHeader);
                 outHeader = NULL;
-            } else {
+            } else if (mIsInFlush) {
                 /* If in flush mode and no output is returned by the codec,
                  * then come out of flush mode */
                 mIsInFlush = false;
@@ -693,6 +685,16 @@
             }
         }
 
+        /* If input EOS is seen and decoder is not in flush mode,
+         * set the decoder in flush mode.
+         * There can be a case where EOS is sent along with last picture data
+         * In that case, only after decoding that input data, decoder has to be
+         * put in flush. This case is handled here  */
+
+        if (mReceivedEOS && !mIsInFlush) {
+            setFlushMode();
+        }
+
         if (inHeader != NULL) {
             inInfo->mOwnedByUs = false;
             inQueue.erase(inQueue.begin());
diff --git a/media/libstagefright/codecs/avcenc/Android.mk b/media/libstagefright/codecs/avcenc/Android.mk
index 24a4db9..70e531b 100644
--- a/media/libstagefright/codecs/avcenc/Android.mk
+++ b/media/libstagefright/codecs/avcenc/Android.mk
@@ -23,6 +23,9 @@
 LOCAL_SHARED_LIBRARIES  += libutils
 LOCAL_SHARED_LIBRARIES  += liblog
 
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
+
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index e4e8fd7..6ec8c41 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -28,6 +28,8 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
+#include <OMX_IndexExt.h>
+#include <OMX_VideoExt.h>
 #include <ui/Rect.h>
 
 #include "ih264_typedefs.h"
@@ -157,8 +159,7 @@
             kProfileLevels, NELEM(kProfileLevels),
             176 /* width */, 144 /* height */,
             callbacks, appData, component),
-      mBitrateUpdated(false),
-      mKeyFrameRequested(false),
+      mUpdateFlag(0),
       mIvVideoColorFormat(IV_YUV_420P),
       mAVCEncProfile(IV_PROFILE_BASE),
       mAVCEncLevel(41),
@@ -207,6 +208,7 @@
     mEnableAltRef = DEFAULT_ENABLE_ALT_REF;
     mEncSpeed = DEFAULT_ENC_SPEED;
     mIntra4x4 = DEFAULT_INTRA4x4;
+    mConstrainedIntraFlag = DEFAULT_CONSTRAINED_INTRA;
     mAIRMode = DEFAULT_AIR;
     mAIRRefreshPeriod = DEFAULT_AIR_REFRESH_PERIOD;
     mPSNREnable = DEFAULT_PSNR_ENABLE;
@@ -304,6 +306,7 @@
 
     s_ipe_params_ip.u4_enable_intra_4x4 = mIntra4x4;
     s_ipe_params_ip.u4_enc_speed_preset = mEncSpeed;
+    s_ipe_params_ip.u4_constrained_intra_pred = mConstrainedIntraFlag;
 
     s_ipe_params_ip.u4_timestamp_high = -1;
     s_ipe_params_ip.u4_timestamp_low = -1;
@@ -1017,6 +1020,7 @@
             }
 
             mIInterval = avcType->nPFrames + avcType->nBFrames;
+            mConstrainedIntraFlag = avcType->bconstIpred;
 
             if (OMX_VIDEO_AVCLoopFilterDisable == avcType->eLoopFilterMode)
                 mDisableDeblkLevel = 4;
@@ -1026,7 +1030,6 @@
                     || avcType->nRefIdx10ActiveMinus1 != 0
                     || avcType->nRefIdx11ActiveMinus1 != 0
                     || avcType->bWeightedPPrediction != OMX_FALSE
-                    || avcType->bconstIpred != OMX_FALSE
                     || avcType->bDirect8x8Inference != OMX_FALSE
                     || avcType->bDirectSpatialTemporal != OMX_FALSE
                     || avcType->nCabacInitIdc != 0) {
@@ -1045,9 +1048,35 @@
     }
 }
 
+OMX_ERRORTYPE SoftAVC::getConfig(
+        OMX_INDEXTYPE index, OMX_PTR _params) {
+    switch ((int)index) {
+        case OMX_IndexConfigAndroidIntraRefresh:
+        {
+            OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE *intraRefreshParams =
+                (OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE *)_params;
+
+            if (!isValidOMXParam(intraRefreshParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (intraRefreshParams->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorUndefined;
+            }
+
+            intraRefreshParams->nRefreshPeriod =
+                    (mAIRMode == IVE_AIR_MODE_NONE) ? 0 : mAIRRefreshPeriod;
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SoftVideoEncoderOMXComponent::getConfig(index, _params);
+    }
+}
+
 OMX_ERRORTYPE SoftAVC::setConfig(
         OMX_INDEXTYPE index, const OMX_PTR _params) {
-    switch (index) {
+    switch ((int)index) {
         case OMX_IndexConfigVideoIntraVOPRefresh:
         {
             OMX_CONFIG_INTRAREFRESHVOPTYPE *params =
@@ -1061,7 +1090,9 @@
                 return OMX_ErrorBadPortIndex;
             }
 
-            mKeyFrameRequested = params->IntraRefreshVOP;
+            if (params->IntraRefreshVOP) {
+                mUpdateFlag |= kRequestKeyFrame;
+            }
             return OMX_ErrorNone;
         }
 
@@ -1080,11 +1111,35 @@
 
             if (mBitrate != params->nEncodeBitrate) {
                 mBitrate = params->nEncodeBitrate;
-                mBitrateUpdated = true;
+                mUpdateFlag |= kUpdateBitrate;
             }
             return OMX_ErrorNone;
         }
 
+        case OMX_IndexConfigAndroidIntraRefresh:
+        {
+            const OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE *intraRefreshParams =
+                (const OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE *)_params;
+
+            if (!isValidOMXParam(intraRefreshParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (intraRefreshParams->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (intraRefreshParams->nRefreshPeriod == 0) {
+                mAIRMode = IVE_AIR_MODE_NONE;
+                mAIRRefreshPeriod = 0;
+            } else if (intraRefreshParams->nRefreshPeriod > 0) {
+                mAIRMode = IVE_AIR_MODE_CYCLIC;
+                mAIRRefreshPeriod = intraRefreshParams->nRefreshPeriod;
+            }
+            mUpdateFlag |= kUpdateAIRMode;
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::setConfig(index, _params);
     }
@@ -1097,7 +1152,7 @@
     }
 
     mBitrate = bitrate->nTargetBitrate;
-    mBitrateUpdated = true;
+    mUpdateFlag |= kUpdateBitrate;
 
     return OMX_ErrorNone;
 }
@@ -1317,12 +1372,19 @@
             return;
         }
 
-        if (mBitrateUpdated) {
-            setBitRate();
-        }
-
-        if (mKeyFrameRequested) {
-            setFrameType(IV_IDR_FRAME);
+        if (mUpdateFlag) {
+            if (mUpdateFlag & kUpdateBitrate) {
+                setBitRate();
+            }
+            if (mUpdateFlag & kRequestKeyFrame) {
+                setFrameType(IV_IDR_FRAME);
+            }
+            if (mUpdateFlag & kUpdateAIRMode) {
+                setAirParams();
+                notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+                        OMX_IndexConfigAndroidIntraRefresh, NULL);
+            }
+            mUpdateFlag = 0;
         }
 
         if ((inputBufferHeader != NULL)
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index 4418a7f..cf6f899 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -95,8 +95,7 @@
 #define DEFAULT_SOC                 SOC_GENERIC
 #define DEFAULT_INTRA4x4            0
 #define STRLENGTH                   500
-
-
+#define DEFAULT_CONSTRAINED_INTRA   0
 
 #define MIN(a, b) ((a) < (b))? (a) : (b)
 #define MAX(a, b) ((a) > (b))? (a) : (b)
@@ -142,6 +141,12 @@
         kNumBuffers = 2,
     };
 
+    enum {
+        kUpdateBitrate            = 1 << 0,
+        kRequestKeyFrame          = 1 << 1,
+        kUpdateAIRMode            = 1 << 2,
+    };
+
     // OMX input buffer's timestamp and flags
     typedef struct {
         int64_t mTimeUs;
@@ -153,11 +158,7 @@
     struct timeval mTimeStart;   // Time at the start of decode()
     struct timeval mTimeEnd;     // Time at the end of decode()
 
-
-    // If a request for a change it bitrate has been received.
-    bool mBitrateUpdated;
-
-    bool mKeyFrameRequested;
+    int mUpdateFlag;
 
 #ifdef FILE_DUMP_ENABLE
     char mInFile[200];
@@ -180,6 +181,7 @@
     bool     mReconEnable;
     bool     mPSNREnable;
     bool     mEntropyMode;
+    bool     mConstrainedIntraFlag;
     IVE_SPEED_CONFIG     mEncSpeed;
 
     uint8_t *mConversionBuffers[MAX_CONVERSION_BUFFERS];
@@ -218,6 +220,9 @@
     OMX_ERRORTYPE setConfig(
         OMX_INDEXTYPE index, const OMX_PTR _params);
 
+    OMX_ERRORTYPE getConfig(
+        OMX_INDEXTYPE index, const OMX_PTR _params);
+
     // Handles port definition changes.
     OMX_ERRORTYPE internalSetPortParams(
         const OMX_PARAM_PORTDEFINITIONTYPE *port);
diff --git a/media/libstagefright/codecs/common/Config.mk b/media/libstagefright/codecs/common/Config.mk
deleted file mode 100644
index a843cef..0000000
--- a/media/libstagefright/codecs/common/Config.mk
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# This configure file is just for Linux projects against Android
-#
-
-VOPRJ :=
-VONJ :=
-
-# WARNING:
-# Using v7 breaks generic build
-ifeq ($(TARGET_ARCH),arm)
-VOTT := v5
-else
-VOTT := pc
-endif
-
-# Do we also need to check on ARCH_ARM_HAVE_ARMV7A? - probably not
-ifeq ($(TARGET_ARCH),arm)
-  ifeq ($(ARCH_ARM_HAVE_NEON),true)
-    VOTT := v7
-  endif
-endif
-
-VOTEST := 0
-
diff --git a/media/libstagefright/codecs/flac/enc/Android.mk b/media/libstagefright/codecs/flac/enc/Android.mk
index 59a11de..7e6e015 100644
--- a/media/libstagefright/codecs/flac/enc/Android.mk
+++ b/media/libstagefright/codecs/flac/enc/Android.mk
@@ -10,6 +10,8 @@
         external/flac/include
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk
index a0112e1..b36c99d 100644
--- a/media/libstagefright/codecs/g711/dec/Android.mk
+++ b/media/libstagefright/codecs/g711/dec/Android.mk
@@ -15,5 +15,7 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.cpp b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
index c8277de..9f7b590 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.cpp
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
@@ -283,7 +283,8 @@
 // static
 void SoftG711::DecodeALaw(
         int16_t *out, const uint8_t *in, size_t inSize) {
-    while (inSize-- > 0) {
+    while (inSize > 0) {
+        inSize--;
         int32_t x = *in++;
 
         int32_t ix = x ^ 0x55;
@@ -309,7 +310,8 @@
 // static
 void SoftG711::DecodeMLaw(
         int16_t *out, const uint8_t *in, size_t inSize) {
-    while (inSize-- > 0) {
+    while (inSize > 0) {
+        inSize--;
         int32_t x = *in++;
 
         int32_t mantissa = ~x;
diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk
index 30868d5..fe8c830 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.mk
+++ b/media/libstagefright/codecs/gsm/dec/Android.mk
@@ -10,6 +10,8 @@
         external/libgsm/inc
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/hevcdec/Android.mk b/media/libstagefright/codecs/hevcdec/Android.mk
index c0c694e..78c4637 100644
--- a/media/libstagefright/codecs/hevcdec/Android.mk
+++ b/media/libstagefright/codecs/hevcdec/Android.mk
@@ -13,6 +13,8 @@
 LOCAL_C_INCLUDES += $(TOP)/external/libhevc/common
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES  := libstagefright
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index 1dac868..0215a11 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -377,7 +377,7 @@
     uint8_t *pBuf;
     if (outHeader) {
         if (outHeader->nAllocLen < sizeY + (sizeUV * 2)) {
-            android_errorWriteLog(0x534e4554, "27569635");
+            android_errorWriteLog(0x534e4554, "27833616");
             return false;
         }
         pBuf = outHeader->pBuffer;
@@ -404,7 +404,7 @@
         uint32_t bufferSize = displayStride * displayHeight * 3 / 2;
         mFlushOutBuffer = (uint8_t *)memalign(128, bufferSize);
         if (NULL == mFlushOutBuffer) {
-            ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+            ALOGE("Could not allocate flushOutputBuffer of size %u", bufferSize);
             return;
         }
 
@@ -459,16 +459,6 @@
     List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
     List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
 
-    /* If input EOS is seen and decoder is not in flush mode,
-     * set the decoder in flush mode.
-     * There can be a case where EOS is sent along with last picture data
-     * In that case, only after decoding that input data, decoder has to be
-     * put in flush. This case is handled here  */
-
-    if (mReceivedEOS && !mIsInFlush) {
-        setFlushMode();
-    }
-
     while (!outQueue.empty()) {
         BufferInfo *inInfo;
         OMX_BUFFERHEADERTYPE *inHeader;
@@ -586,6 +576,8 @@
                 mChangingResolution = false;
                 resetDecoder();
                 resetPlugin();
+                mStride = outputBufferWidth();
+                setParams(mStride);
                 continue;
             }
 
@@ -620,7 +612,7 @@
                 outInfo = NULL;
                 notifyFillBufferDone(outHeader);
                 outHeader = NULL;
-            } else {
+            } else if (mIsInFlush) {
                 /* If in flush mode and no output is returned by the codec,
                  * then come out of flush mode */
                 mIsInFlush = false;
@@ -641,6 +633,16 @@
             }
         }
 
+        /* If input EOS is seen and decoder is not in flush mode,
+         * set the decoder in flush mode.
+         * There can be a case where EOS is sent along with last picture data
+         * In that case, only after decoding that input data, decoder has to be
+         * put in flush. This case is handled here  */
+
+        if (mReceivedEOS && !mIsInFlush) {
+            setFlushMode();
+        }
+
         // TODO: Handle more than one picture data
         if (inHeader != NULL) {
             inInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
index 1d232c6..eb39b44 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
@@ -47,6 +47,8 @@
 LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF=
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
 
@@ -75,5 +77,7 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index 7117692..ab079e8 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -34,6 +34,8 @@
     $(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_STATIC_LIBRARY)
 
@@ -75,5 +77,29 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
+
+################################################################################
+
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := \
+        test/m4v_h263_enc_test.cpp
+
+LOCAL_C_INCLUDES := \
+        $(LOCAL_PATH)/src \
+        $(LOCAL_PATH)/include
+
+LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF= -DBX_RC
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
+
+LOCAL_STATIC_LIBRARIES := \
+        libstagefright_m4vh263enc
+
+LOCAL_MODULE := libstagefright_m4vh263enc_test
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp b/media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp
index 0ad39a6..50c8161 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/fastcodemb.cpp
@@ -527,6 +527,9 @@
 /*  Modified :                                                              */
 /*      8/15/01,  - do 4 pixel at a time    assuming 32 bit register        */
 /* ======================================================================== */
+#ifdef __clang__
+__attribute((no_sanitize("integer")))
+#endif
 Int Sad8x8(UChar *cur, UChar *prev, Int width)
 {
     UChar *end = cur + (width << 3);
@@ -590,7 +593,9 @@
 /*  Modified :                                                              */
 /*          8/15/01,  - SIMD 4 pixels at a time                         */
 /* ======================================================================== */
-
+#ifdef __clang__
+__attribute((no_sanitize("integer")))
+#endif
 Int getBlockSum(UChar *cur, Int width)
 {
     Int sad = 0, sum4 = 0, sum2 = 0;
diff --git a/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp b/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp
new file mode 100644
index 0000000..db2c61a
--- /dev/null
+++ b/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <assert.h>
+#include <stdlib.h>
+
+#include "mp4enc_api.h"
+
+// Constants.
+enum {
+    kMaxWidth         = 720,
+    kMaxHeight        = 480,
+    kMaxFrameRate     = 30,
+    kMaxBitrate       = 2048, // in kbps.
+    kOutputBufferSize = 250 * 1024,
+    kIDRFrameRefreshIntervalInSec = 1, // in seconds.
+};
+
+int main(int argc, char *argv[]) {
+
+    if (argc < 8) {
+        fprintf(stderr, "Usage %s <input yuv> <output file> <mode> <width> "
+                        "<height> <frame rate> <bitrate in kbps>\n", argv[0]);
+        fprintf(stderr, "mode : h263 or mpeg4\n");
+        fprintf(stderr, "Max width %d\n", kMaxWidth);
+        fprintf(stderr, "Max height %d\n", kMaxHeight);
+        fprintf(stderr, "Max framerate %d\n", kMaxFrameRate);
+        fprintf(stderr, "Max bitrate %d kbps\n", kMaxBitrate);
+        return EXIT_FAILURE;
+    }
+
+    // Read mode.
+    bool isH263mode;
+    if (strcmp(argv[3], "mpeg4") == 0) {
+        isH263mode = false;
+    } else if (strcmp(argv[3], "h263") == 0) {
+        isH263mode = true;
+    } else {
+        fprintf(stderr, "Unsupported mode %s\n", argv[3]);
+        return EXIT_FAILURE;
+    }
+
+    // Read height and width.
+    int32_t width;
+    int32_t height;
+    width = atoi(argv[4]);
+    height = atoi(argv[5]);
+    if (width > kMaxWidth || height > kMaxHeight || width <= 0 || height <= 0) {
+        fprintf(stderr, "Unsupported dimensions %dx%d\n", width, height);
+        return EXIT_FAILURE;
+    }
+
+    if (width % 16 != 0 || height % 16 != 0) {
+        fprintf(stderr, "Video frame size %dx%d must be a multiple of 16\n",
+            width, height);
+        return EXIT_FAILURE;
+    }
+
+    // Read frame rate.
+    int32_t frameRate;
+    frameRate = atoi(argv[6]);
+    if (frameRate > kMaxFrameRate || frameRate <= 0) {
+        fprintf(stderr, "Unsupported frame rate %d\n", frameRate);
+        return EXIT_FAILURE;
+    }
+
+    // Read bitrate.
+    int32_t bitrate;
+    bitrate = atoi(argv[7]);
+    if (bitrate > kMaxBitrate || bitrate <= 0) {
+        fprintf(stderr, "Unsupported bitrate %d\n", bitrate);
+        return EXIT_FAILURE;
+    }
+
+    // Allocate input buffer.
+    uint8_t *inputBuf = (uint8_t *)malloc((width * height * 3) / 2);
+    assert(inputBuf != NULL);
+
+    // Allocate output buffer.
+    uint8_t *outputBuf = (uint8_t *)malloc(kOutputBufferSize);
+    assert(outputBuf != NULL);
+
+    // Open the input file.
+    FILE *fpInput = fopen(argv[1], "rb");
+    if (fpInput == NULL) {
+        fprintf(stderr, "Could not open %s\n", argv[1]);
+        free(inputBuf);
+        free(outputBuf);
+        return EXIT_FAILURE;
+    }
+
+    // Open the output file.
+    FILE *fpOutput = fopen(argv[2], "wb");
+    if (fpOutput == NULL) {
+        fprintf(stderr, "Could not open %s\n", argv[2]);
+        free(inputBuf);
+        free(outputBuf);
+        fclose(fpInput);
+        return EXIT_FAILURE;
+    }
+
+    // Initialize the encoder parameters.
+    tagvideoEncOptions encParams;
+    memset(&encParams, 0, sizeof(tagvideoEncOptions));
+    if (!PVGetDefaultEncOption(&encParams, 0)) {
+        fprintf(stderr, "Failed to get default encoding parameters\n");
+        free(inputBuf);
+        free(outputBuf);
+        fclose(fpInput);
+        fclose(fpOutput);
+        return EXIT_FAILURE;
+    }
+
+    if (isH263mode == false) {
+        encParams.encMode = COMBINE_MODE_WITH_ERR_RES;
+    } else {
+        encParams.encMode = H263_MODE;
+    }
+    encParams.encWidth[0] = width;
+    encParams.encHeight[0] = height;
+    encParams.encFrameRate[0] = frameRate;
+    encParams.rcType = VBR_1;
+    encParams.vbvDelay = 5.0f;
+    encParams.profile_level = CORE_PROFILE_LEVEL2;
+    encParams.packetSize = 32;
+    encParams.rvlcEnable = PV_OFF;
+    encParams.numLayers = 1;
+    encParams.timeIncRes = 1000;
+    encParams.tickPerSrc = encParams.timeIncRes / frameRate;
+
+    encParams.bitRate[0] = bitrate * 1024;
+    encParams.iQuant[0] = 15;
+    encParams.pQuant[0] = 12;
+    encParams.quantType[0] = 0;
+    encParams.noFrameSkipped = PV_OFF;
+
+    int32_t  IDRFrameRefreshIntervalInSec = kIDRFrameRefreshIntervalInSec;
+    if (IDRFrameRefreshIntervalInSec == 0) {
+        encParams.intraPeriod = 1;  // All I frames.
+    } else {
+        encParams.intraPeriod = (IDRFrameRefreshIntervalInSec * frameRate);
+    }
+
+    encParams.numIntraMB = 0;
+    encParams.sceneDetect = PV_ON;
+    encParams.searchRange = 16;
+    encParams.mv8x8Enable = PV_OFF;
+    encParams.gobHeaderInterval = 0;
+    encParams.useACPred = PV_ON;
+    encParams.intraDCVlcTh = 0;
+
+    // Initialize the handle.
+    tagvideoEncControls handle;
+    memset(&handle, 0, sizeof(tagvideoEncControls));
+
+    // Initialize the encoder.
+    if (!PVInitVideoEncoder(&handle, &encParams)) {
+        fprintf(stderr, "Failed to initialize the encoder\n");
+        return EXIT_FAILURE;
+    }
+
+    // Generate the header.
+    int32_t headerLength = kOutputBufferSize;
+    if (!PVGetVolHeader(&handle, outputBuf, &headerLength, 0)) {
+        fprintf(stderr, "Failed to get VOL header\n");
+        return EXIT_FAILURE;
+    }
+    fwrite(outputBuf, 1, headerLength, fpOutput);
+
+    // Core loop.
+    int32_t retVal = EXIT_SUCCESS;
+    int32_t frameSize = (width * height * 3) / 2;
+    int32_t numFramesEncoded = 0;
+
+    while (1) {
+        // Read the input frame.
+        int32_t bytesRead;
+        bytesRead = fread(inputBuf, 1, frameSize, fpInput);
+        if (bytesRead != frameSize) {
+            break; // End of file.
+        }
+
+        // Encode the input frame.
+        VideoEncFrameIO vin, vout;
+        memset(&vin, 0, sizeof(vin));
+        memset(&vout, 0, sizeof(vout));
+        vin.height = height; // height is multiple of 16.
+        vin.pitch = width; // width is multiple of 16.
+        vin.timestamp = (numFramesEncoded * 1000) / frameRate;  // in ms.
+        vin.yChan = inputBuf;
+        vin.uChan = vin.yChan + vin.height * vin.pitch;
+        vin.vChan = vin.uChan + ((vin.height * vin.pitch) >> 2);
+
+        uint32_t modTimeMs = 0;
+        int32_t nLayer = 0;
+        MP4HintTrack hintTrack;
+        int32_t dataLength = kOutputBufferSize;
+        if (!PVEncodeVideoFrame(&handle, &vin, &vout,
+                &modTimeMs, outputBuf, &dataLength, &nLayer) ||
+            !PVGetHintTrack(&handle, &hintTrack)) {
+            fprintf(stderr, "Failed to encode frame or get hink track at "
+                    " frame %d\n", numFramesEncoded);
+            retVal = EXIT_FAILURE;
+            break;
+        }
+        PVGetOverrunBuffer(&handle);
+        numFramesEncoded++;
+
+        // Write the output.
+        fwrite(outputBuf, 1, dataLength, fpOutput);
+    }
+
+    // Close input and output file.
+    fclose(fpInput);
+    fclose(fpOutput);
+
+    // Free allocated memory.
+    free(inputBuf);
+    free(outputBuf);
+
+    // Close encoder instance.
+    PVCleanUpVideoEncoder(&handle);
+    return retVal;
+}
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index 948ae29..11581c1 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -28,19 +28,22 @@
  	src/pvmp3_stereo_proc.cpp \
  	src/pvmp3_reorder.cpp \
 
-ifeq ($(TARGET_ARCH),arm)
-LOCAL_SRC_FILES += \
+LOCAL_SRC_FILES_arm += \
 	src/asm/pvmp3_polyphase_filter_window_gcc.s \
  	src/asm/pvmp3_mdct_18_gcc.s \
  	src/asm/pvmp3_dct_9_gcc.s \
 	src/asm/pvmp3_dct_16_gcc.s
-else
-LOCAL_SRC_FILES += \
+LOCAL_SRC_FILES_other_archs := \
  	src/pvmp3_polyphase_filter_window.cpp \
  	src/pvmp3_mdct_18.cpp \
  	src/pvmp3_dct_9.cpp \
  	src/pvmp3_dct_16.cpp
-endif
+
+LOCAL_SRC_FILES_arm64  := $(LOCAL_SRC_FILES_other_archs)
+LOCAL_SRC_FILES_mips   := $(LOCAL_SRC_FILES_other_archs)
+LOCAL_SRC_FILES_mips64 := $(LOCAL_SRC_FILES_other_archs)
+LOCAL_SRC_FILES_x86    := $(LOCAL_SRC_FILES_other_archs)
+LOCAL_SRC_FILES_x86_64 := $(LOCAL_SRC_FILES_other_archs)
 
 LOCAL_C_INCLUDES := \
         frameworks/av/media/libstagefright/include \
@@ -51,6 +54,8 @@
         -D"OSCL_UNUSED_ARG(x)=(void)(x)"
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_mp3dec
 
@@ -72,6 +77,8 @@
         $(LOCAL_PATH)/include
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils liblog
@@ -83,3 +90,27 @@
 LOCAL_MODULE_TAGS := optional
 
 include $(BUILD_SHARED_LIBRARY)
+
+################################################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := \
+        test/mp3dec_test.cpp  \
+        test/mp3reader.cpp
+
+LOCAL_C_INCLUDES := \
+        $(LOCAL_PATH)/src \
+        $(LOCAL_PATH)/include \
+        $(LOCAL_PATH)/test/include \
+        $(call include-path-for, audio-utils)
+
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
+LOCAL_STATIC_LIBRARIES := \
+        libstagefright_mp3dec libsndfile
+
+LOCAL_SHARED_LIBRARIES := libaudioutils
+
+LOCAL_MODULE := libstagefright_mp3dec_test
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 9f7dd59..0822c34 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -275,7 +275,7 @@
 
         mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t);
         if ((int32)outHeader->nAllocLen < mConfig->outputFrameSize) {
-            ALOGE("input buffer too small: got %lu, expected %u",
+            ALOGE("input buffer too small: got %u, expected %u",
                 outHeader->nAllocLen, mConfig->outputFrameSize);
             android_errorWriteLog(0x534e4554, "27793371");
             notify(OMX_EventError, OMX_ErrorUndefined, OUTPUT_BUFFER_TOO_SMALL, NULL);
diff --git a/media/libstagefright/codecs/mp3dec/test/mp3dec_test.cpp b/media/libstagefright/codecs/mp3dec/test/mp3dec_test.cpp
new file mode 100644
index 0000000..26d62f3
--- /dev/null
+++ b/media/libstagefright/codecs/mp3dec/test/mp3dec_test.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "pvmp3decoder_api.h"
+#include "mp3reader.h"
+#include <audio_utils/sndfile.h>
+
+using namespace std;
+
+enum {
+    kInputBufferSize = 10 * 1024,
+    kOutputBufferSize = 4608 * 2,
+};
+
+int main(int argc, const char **argv) {
+
+    if (argc != 3) {
+        fprintf(stderr, "Usage %s <input file> <output file>\n", argv[0]);
+        return EXIT_FAILURE;
+    }
+
+    // Initialize the config.
+    tPVMP3DecoderExternal config;
+    config.equalizerType = flat;
+    config.crcEnabled = false;
+
+    // Allocate the decoder memory.
+    uint32_t memRequirements = pvmp3_decoderMemRequirements();
+    void *decoderBuf = malloc(memRequirements);
+    assert(decoderBuf != NULL);
+
+    // Initialize the decoder.
+    pvmp3_InitDecoder(&config, decoderBuf);
+
+    // Open the input file.
+    Mp3Reader mp3Reader;
+    bool success = mp3Reader.init(argv[1]);
+    if (!success) {
+        fprintf(stderr, "Encountered error reading %s\n", argv[1]);
+        free(decoderBuf);
+        return EXIT_FAILURE;
+    }
+
+    // Open the output file.
+    SF_INFO sfInfo;
+    memset(&sfInfo, 0, sizeof(SF_INFO));
+    sfInfo.channels = mp3Reader.getNumChannels();
+    sfInfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
+    sfInfo.samplerate = mp3Reader.getSampleRate();
+    SNDFILE *handle = sf_open(argv[2], SFM_WRITE, &sfInfo);
+    if (handle == NULL) {
+        fprintf(stderr, "Encountered error writing %s\n", argv[2]);
+        mp3Reader.close();
+        free(decoderBuf);
+        return EXIT_FAILURE;
+    }
+
+    // Allocate input buffer.
+    uint8_t *inputBuf = static_cast<uint8_t*>(malloc(kInputBufferSize));
+    assert(inputBuf != NULL);
+
+    // Allocate output buffer.
+    int16_t *outputBuf = static_cast<int16_t*>(malloc(kOutputBufferSize));
+    assert(outputBuf != NULL);
+
+    // Decode loop.
+    int retVal = EXIT_SUCCESS;
+    while (1) {
+        // Read input from the file.
+        uint32_t bytesRead;
+        bool success = mp3Reader.getFrame(inputBuf, &bytesRead);
+        if (!success) break;
+
+        // Set the input config.
+        config.inputBufferCurrentLength = bytesRead;
+        config.inputBufferMaxLength = 0;
+        config.inputBufferUsedLength = 0;
+        config.pInputBuffer = inputBuf;
+        config.pOutputBuffer = outputBuf;
+        config.outputFrameSize = kOutputBufferSize / sizeof(int16_t);
+
+        ERROR_CODE decoderErr;
+        decoderErr = pvmp3_framedecoder(&config, decoderBuf);
+        if (decoderErr != NO_DECODING_ERROR) {
+            fprintf(stderr, "Decoder encountered error\n");
+            retVal = EXIT_FAILURE;
+            break;
+        }
+        sf_writef_short(handle, outputBuf,
+                        config.outputFrameSize / sfInfo.channels);
+    }
+
+    // Close input reader and output writer.
+    mp3Reader.close();
+    sf_close(handle);
+
+    // Free allocated memory.
+    free(inputBuf);
+    free(outputBuf);
+    free(decoderBuf);
+
+    return retVal;
+}
diff --git a/media/libstagefright/codecs/mp3dec/test/mp3reader.cpp b/media/libstagefright/codecs/mp3dec/test/mp3reader.cpp
new file mode 100644
index 0000000..b3138ec
--- /dev/null
+++ b/media/libstagefright/codecs/mp3dec/test/mp3reader.cpp
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <stdint.h>
+#include "mp3reader.h"
+
+static uint32_t U32_AT(const uint8_t *ptr) {
+    return ptr[0] << 24 | ptr[1] << 16 | ptr[2] << 8 | ptr[3];
+}
+
+static bool parseHeader(
+        uint32_t header, size_t *frame_size,
+        uint32_t *out_sampling_rate = NULL, uint32_t *out_channels = NULL ,
+        uint32_t *out_bitrate = NULL, uint32_t *out_num_samples = NULL) {
+    *frame_size = 0;
+
+    if (out_sampling_rate) {
+        *out_sampling_rate = 0;
+    }
+
+    if (out_channels) {
+        *out_channels = 0;
+    }
+
+    if (out_bitrate) {
+        *out_bitrate = 0;
+    }
+
+    if (out_num_samples) {
+        *out_num_samples = 1152;
+    }
+
+    if ((header & 0xffe00000) != 0xffe00000) {
+        return false;
+    }
+
+    unsigned version = (header >> 19) & 3;
+
+    if (version == 0x01) {
+        return false;
+    }
+
+    unsigned layer = (header >> 17) & 3;
+
+    if (layer == 0x00) {
+        return false;
+    }
+
+    unsigned bitrate_index = (header >> 12) & 0x0f;
+
+    if (bitrate_index == 0 || bitrate_index == 0x0f) {
+        // Disallow "free" bitrate.
+        return false;
+    }
+
+    unsigned sampling_rate_index = (header >> 10) & 3;
+
+    if (sampling_rate_index == 3) {
+        return false;
+    }
+
+    static const int kSamplingRateV1[] = { 44100, 48000, 32000 };
+    int sampling_rate = kSamplingRateV1[sampling_rate_index];
+    if (version == 2 /* V2 */) {
+        sampling_rate /= 2;
+    } else if (version == 0 /* V2.5 */) {
+        sampling_rate /= 4;
+    }
+
+    unsigned padding = (header >> 9) & 1;
+
+    if (layer == 3) {
+        // layer I
+
+        static const int kBitrateV1[] = {
+            32, 64, 96, 128, 160, 192, 224, 256,
+            288, 320, 352, 384, 416, 448
+        };
+
+        static const int kBitrateV2[] = {
+            32, 48, 56, 64, 80, 96, 112, 128,
+            144, 160, 176, 192, 224, 256
+        };
+
+        int bitrate =
+            (version == 3 /* V1 */)
+                ? kBitrateV1[bitrate_index - 1]
+                : kBitrateV2[bitrate_index - 1];
+
+        if (out_bitrate) {
+            *out_bitrate = bitrate;
+        }
+
+        *frame_size = (12000 * bitrate / sampling_rate + padding) * 4;
+
+        if (out_num_samples) {
+            *out_num_samples = 384;
+        }
+    } else {
+        // layer II or III
+
+        static const int kBitrateV1L2[] = {
+            32, 48, 56, 64, 80, 96, 112, 128,
+            160, 192, 224, 256, 320, 384
+        };
+
+        static const int kBitrateV1L3[] = {
+            32, 40, 48, 56, 64, 80, 96, 112,
+            128, 160, 192, 224, 256, 320
+        };
+
+        static const int kBitrateV2[] = {
+            8, 16, 24, 32, 40, 48, 56, 64,
+            80, 96, 112, 128, 144, 160
+        };
+
+        int bitrate;
+        if (version == 3 /* V1 */) {
+            bitrate = (layer == 2 /* L2 */)
+                ? kBitrateV1L2[bitrate_index - 1]
+                : kBitrateV1L3[bitrate_index - 1];
+
+            if (out_num_samples) {
+                *out_num_samples = 1152;
+            }
+        } else {
+            // V2 (or 2.5)
+
+            bitrate = kBitrateV2[bitrate_index - 1];
+            if (out_num_samples) {
+                *out_num_samples = (layer == 1 /* L3 */) ? 576 : 1152;
+            }
+        }
+
+        if (out_bitrate) {
+            *out_bitrate = bitrate;
+        }
+
+        if (version == 3 /* V1 */) {
+            *frame_size = 144000 * bitrate / sampling_rate + padding;
+        } else {
+            // V2 or V2.5
+            size_t tmp = (layer == 1 /* L3 */) ? 72000 : 144000;
+            *frame_size = tmp * bitrate / sampling_rate + padding;
+        }
+    }
+
+    if (out_sampling_rate) {
+        *out_sampling_rate = sampling_rate;
+    }
+
+    if (out_channels) {
+        int channel_mode = (header >> 6) & 3;
+
+        *out_channels = (channel_mode == 3) ? 1 : 2;
+    }
+
+    return true;
+}
+
+// Mask to extract the version, layer, sampling rate parts of the MP3 header,
+// which should be same for all MP3 frames.
+static const uint32_t kMask = 0xfffe0c00;
+
+static ssize_t sourceReadAt(FILE *fp, off64_t offset, void *data, size_t size) {
+    int retVal = fseek(fp, offset, SEEK_SET);
+    if (retVal != EXIT_SUCCESS) {
+        return 0;
+    } else {
+       return fread(data, 1, size, fp);
+    }
+}
+
+// Resync to next valid MP3 frame in the file.
+static bool resync(
+        FILE *fp, uint32_t match_header,
+        off64_t *inout_pos, uint32_t *out_header) {
+
+    if (*inout_pos == 0) {
+        // Skip an optional ID3 header if syncing at the very beginning
+        // of the datasource.
+
+        for (;;) {
+            uint8_t id3header[10];
+            int retVal = sourceReadAt(fp, *inout_pos, id3header,
+                                      sizeof(id3header));
+            if (retVal < (ssize_t)sizeof(id3header)) {
+                // If we can't even read these 10 bytes, we might as well bail
+                // out, even if there _were_ 10 bytes of valid mp3 audio data...
+                return false;
+            }
+
+            if (memcmp("ID3", id3header, 3)) {
+                break;
+            }
+
+            // Skip the ID3v2 header.
+
+            size_t len =
+                ((id3header[6] & 0x7f) << 21)
+                | ((id3header[7] & 0x7f) << 14)
+                | ((id3header[8] & 0x7f) << 7)
+                | (id3header[9] & 0x7f);
+
+            len += 10;
+
+            *inout_pos += len;
+        }
+
+    }
+
+    off64_t pos = *inout_pos;
+    bool valid = false;
+
+    const int32_t kMaxReadBytes = 1024;
+    const int32_t kMaxBytesChecked = 128 * 1024;
+    uint8_t buf[kMaxReadBytes];
+    ssize_t bytesToRead = kMaxReadBytes;
+    ssize_t totalBytesRead = 0;
+    ssize_t remainingBytes = 0;
+    bool reachEOS = false;
+    uint8_t *tmp = buf;
+
+    do {
+        if (pos >= *inout_pos + kMaxBytesChecked) {
+            // Don't scan forever.
+            break;
+        }
+
+        if (remainingBytes < 4) {
+            if (reachEOS) {
+                break;
+            } else {
+                memcpy(buf, tmp, remainingBytes);
+                bytesToRead = kMaxReadBytes - remainingBytes;
+
+                /*
+                 * The next read position should start from the end of
+                 * the last buffer, and thus should include the remaining
+                 * bytes in the buffer.
+                 */
+                totalBytesRead = sourceReadAt(fp, pos + remainingBytes,
+                                             buf + remainingBytes, bytesToRead);
+
+                if (totalBytesRead <= 0) {
+                    break;
+                }
+                reachEOS = (totalBytesRead != bytesToRead);
+                remainingBytes += totalBytesRead;
+                tmp = buf;
+                continue;
+            }
+        }
+
+        uint32_t header = U32_AT(tmp);
+
+        if (match_header != 0 && (header & kMask) != (match_header & kMask)) {
+            ++pos;
+            ++tmp;
+            --remainingBytes;
+            continue;
+        }
+
+        size_t frame_size;
+        uint32_t sample_rate, num_channels, bitrate;
+        if (!parseHeader(
+                    header, &frame_size,
+                    &sample_rate, &num_channels, &bitrate)) {
+            ++pos;
+            ++tmp;
+            --remainingBytes;
+            continue;
+        }
+
+        // We found what looks like a valid frame,
+        // now find its successors.
+
+        off64_t test_pos = pos + frame_size;
+
+        valid = true;
+        const int FRAME_MATCH_REQUIRED = 3;
+        for (int j = 0; j < FRAME_MATCH_REQUIRED; ++j) {
+            uint8_t tmp[4];
+            ssize_t retval = sourceReadAt(fp, test_pos, tmp, sizeof(tmp));
+            if (retval < (ssize_t)sizeof(tmp)) {
+                valid = false;
+                break;
+            }
+
+            uint32_t test_header = U32_AT(tmp);
+
+            if ((test_header & kMask) != (header & kMask)) {
+                valid = false;
+                break;
+            }
+
+            size_t test_frame_size;
+            if (!parseHeader(test_header, &test_frame_size)) {
+                valid = false;
+                break;
+            }
+
+            test_pos += test_frame_size;
+        }
+
+        if (valid) {
+            *inout_pos = pos;
+
+            if (out_header != NULL) {
+                *out_header = header;
+            }
+        }
+
+        ++pos;
+        ++tmp;
+        --remainingBytes;
+    } while (!valid);
+
+    return valid;
+}
+
+Mp3Reader::Mp3Reader() : mFp(NULL) {
+}
+
+// Initialize the MP3 reader.
+bool Mp3Reader::init(const char *file) {
+
+    // Open the file.
+    mFp = fopen(file, "rb");
+    if (mFp == NULL) return false;
+
+    // Sync to the first valid frame.
+    off64_t pos = 0;
+    uint32_t header;
+    bool success = resync(mFp, 0 /*match_header*/, &pos, &header);
+    if (success == false) return false;
+
+    mCurrentPos  = pos;
+    mFixedHeader = header;
+
+    size_t frame_size;
+    return parseHeader(header, &frame_size, &mSampleRate,
+                       &mNumChannels, &mBitrate);
+}
+
+// Get the next valid MP3 frame.
+bool Mp3Reader::getFrame(void *buffer, uint32_t *size) {
+
+    size_t frame_size;
+    uint32_t bitrate;
+    uint32_t num_samples;
+    uint32_t sample_rate;
+    for (;;) {
+        ssize_t n = sourceReadAt(mFp, mCurrentPos, buffer, 4);
+        if (n < 4) {
+            return false;
+        }
+
+        uint32_t header = U32_AT((const uint8_t *)buffer);
+
+        if ((header & kMask) == (mFixedHeader & kMask)
+            && parseHeader(
+                header, &frame_size, &sample_rate, NULL /*out_channels*/,
+                &bitrate, &num_samples)) {
+            break;
+        }
+
+        // Lost sync.
+        off64_t pos = mCurrentPos;
+        if (!resync(mFp, mFixedHeader, &pos, NULL /*out_header*/)) {
+            // Unable to resync. Signalling end of stream.
+            return false;
+        }
+
+        mCurrentPos = pos;
+
+        // Try again with the new position.
+    }
+    ssize_t n = sourceReadAt(mFp, mCurrentPos, buffer, frame_size);
+    if (n < (ssize_t)frame_size) {
+        return false;
+    }
+
+    *size = frame_size;
+    mCurrentPos += frame_size;
+    return true;
+}
+
+// Close the MP3 reader.
+void Mp3Reader::close() {
+    assert(mFp != NULL);
+    fclose(mFp);
+}
+
+Mp3Reader::~Mp3Reader() {
+}
diff --git a/media/libstagefright/codecs/mp3dec/test/mp3reader.h b/media/libstagefright/codecs/mp3dec/test/mp3reader.h
new file mode 100644
index 0000000..871f664
--- /dev/null
+++ b/media/libstagefright/codecs/mp3dec/test/mp3reader.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef MP3READER_H_
+#define MP3READER_H_
+
+class Mp3Reader {
+public:
+    Mp3Reader();
+    bool init(const char *file);
+    bool getFrame(void *buffer, uint32_t *size);
+    uint32_t getSampleRate() { return mSampleRate;}
+    uint32_t getNumChannels() { return mNumChannels;}
+    void close();
+    ~Mp3Reader();
+private:
+    FILE    *mFp;
+    uint32_t mFixedHeader;
+    off64_t  mCurrentPos;
+    uint32_t mSampleRate;
+    uint32_t mNumChannels;
+    uint32_t mBitrate;
+};
+
+
+#endif /* MP3READER_H_ */
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.mk b/media/libstagefright/codecs/mpeg2dec/Android.mk
index 23b126d..f1c1719 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.mk
+++ b/media/libstagefright/codecs/mpeg2dec/Android.mk
@@ -21,6 +21,8 @@
 LOCAL_SHARED_LIBRARIES  += liblog
 
 LOCAL_LDFLAGS := -Wl,-Bsymbolic
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
index e134d38..5210683 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
@@ -74,7 +74,8 @@
       mIvColorFormat(IV_YUV_420P),
       mNewWidth(mWidth),
       mNewHeight(mHeight),
-      mChangingResolution(false) {
+      mChangingResolution(false),
+      mStride(mWidth) {
     initPorts(kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE);
 
     // If input dump is enabled, then open create an empty file
@@ -202,6 +203,8 @@
     /* Set number of cores/threads to be used by the codec */
     setNumCores();
 
+    mStride = 0;
+
     return OK;
 }
 
@@ -384,7 +387,8 @@
     resetPlugin();
 
     /* Set the run time (dynamic) parameters */
-    setParams(displayStride);
+    mStride = outputBufferWidth();
+    setParams(mStride);
 
     /* Set number of cores/threads to be used by the codec */
     setNumCores();
@@ -501,7 +505,7 @@
     uint8_t *pBuf;
     if (outHeader) {
         if (outHeader->nAllocLen < sizeY + (sizeUV * 2)) {
-            android_errorWriteLog(0x534e4554, "27569635");
+            android_errorWriteLog(0x534e4554, "27833616");
             return false;
         }
         pBuf = outHeader->pBuffer;
@@ -548,14 +552,10 @@
     List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
     List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
 
-    /* If input EOS is seen and decoder is not in flush mode,
-     * set the decoder in flush mode.
-     * There can be a case where EOS is sent along with last picture data
-     * In that case, only after decoding that input data, decoder has to be
-     * put in flush. This case is handled here  */
-
-    if (mReceivedEOS && !mIsInFlush) {
-        setFlushMode();
+    if (outputBufferWidth() != mStride) {
+        /* Set the run-time (dynamic) parameters */
+        mStride = outputBufferWidth();
+        setParams(mStride);
     }
 
     while (!outQueue.empty()) {
@@ -686,6 +686,8 @@
                 mChangingResolution = false;
                 resetDecoder();
                 resetPlugin();
+                mStride = outputBufferWidth();
+                setParams(mStride);
                 continue;
             }
 
@@ -739,7 +741,7 @@
                     notifyFillBufferDone(outHeader);
                     outHeader = NULL;
                 }
-            } else {
+            } else if (mIsInFlush) {
                 /* If in flush mode and no output is returned by the codec,
                  * then come out of flush mode */
                 mIsInFlush = false;
@@ -760,6 +762,16 @@
             }
         }
 
+        /* If input EOS is seen and decoder is not in flush mode,
+         * set the decoder in flush mode.
+         * There can be a case where EOS is sent along with last picture data
+         * In that case, only after decoding that input data, decoder has to be
+         * put in flush. This case is handled here  */
+
+        if (mReceivedEOS && !mIsInFlush) {
+            setFlushMode();
+        }
+
         // TODO: Handle more than one picture data
         if (inHeader != NULL) {
             inInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
index f48b70b..025e9a0 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
@@ -106,6 +106,7 @@
     bool mChangingResolution;
     bool mFlushNeeded;
     bool mWaitForI;
+    size_t mStride;
 
     status_t initDecoder();
     status_t deInitDecoder();
diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk
index 93ff64c..76f7600 100644
--- a/media/libstagefright/codecs/on2/dec/Android.mk
+++ b/media/libstagefright/codecs/on2/dec/Android.mk
@@ -21,5 +21,7 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 912fac2..3490008 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -17,6 +17,8 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "SoftVPX"
 #include <utils/Log.h>
+#include <utils/misc.h>
+#include "OMX_VideoExt.h"
 
 #include "SoftVPX.h"
 
@@ -26,6 +28,11 @@
 
 namespace android {
 
+// Only need to declare the highest supported profile and level here.
+static const CodecProfileLevel kVP9ProfileLevels[] = {
+    { OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level5  },
+};
+
 SoftVPX::SoftVPX(
         const char *name,
         const char *componentRole,
@@ -35,7 +42,8 @@
         OMX_COMPONENTTYPE **component)
     : SoftVideoDecoderOMXComponent(
             name, componentRole, codingType,
-            NULL /* profileLevels */, 0 /* numProfileLevels */,
+            codingType == OMX_VIDEO_CodingVP8 ? NULL : kVP9ProfileLevels,
+            codingType == OMX_VIDEO_CodingVP8 ?  0 : NELEM(kVP9ProfileLevels),
             320 /* width */, 240 /* height */, callbacks, appData, component),
       mMode(codingType == OMX_VIDEO_CodingVP8 ? MODE_VP8 : MODE_VP9),
       mEOSStatus(INPUT_DATA_AVAILABLE),
@@ -102,7 +110,6 @@
 }
 
 bool SoftVPX::outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset) {
-    List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
     BufferInfo *outInfo = NULL;
     OMX_BUFFERHEADERTYPE *outHeader = NULL;
@@ -215,7 +222,6 @@
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
     bool EOSseen = false;
-    vpx_codec_err_t err;
     bool portWillReset = false;
 
     while ((mEOSStatus == INPUT_EOS_SEEN || !inQueue.empty())
@@ -233,28 +239,54 @@
                     mEOSStatus == INPUT_EOS_SEEN) {
                 return;
             }
+            // Continue as outQueue may be empty now.
+            continue;
         }
 
         BufferInfo *inInfo = *inQueue.begin();
         OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+        // Software VP9 Decoder does not need the Codec Specific Data (CSD)
+        // (specified in http://www.webmproject.org/vp9/profiles/). Ignore it if
+        // it was passed.
+        if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) {
+            // Only ignore CSD buffer for VP9.
+            if (mMode == MODE_VP9) {
+                inQueue.erase(inQueue.begin());
+                inInfo->mOwnedByUs = false;
+                notifyEmptyBufferDone(inHeader);
+                continue;
+            } else {
+                // Tolerate the CSD buffer for VP8. This is a workaround
+                // for b/28689536.
+                ALOGW("WARNING: Got CSD buffer for VP8.");
+            }
+        }
+
         mTimeStamps[mTimeStampIdx] = inHeader->nTimeStamp;
 
-        BufferInfo *outInfo = *outQueue.begin();
-        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
         if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
             mEOSStatus = INPUT_EOS_SEEN;
             EOSseen = true;
         }
 
-        if (inHeader->nFilledLen > 0 &&
-            vpx_codec_decode((vpx_codec_ctx_t *)mCtx,
-                              inHeader->pBuffer + inHeader->nOffset,
-                              inHeader->nFilledLen,
-                              &mTimeStamps[mTimeStampIdx], 0)) {
-            ALOGE("on2 decoder failed to decode frame.");
-            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
-            return;
+        if (inHeader->nFilledLen > 0) {
+            vpx_codec_err_t err = vpx_codec_decode(
+                    (vpx_codec_ctx_t *)mCtx, inHeader->pBuffer + inHeader->nOffset,
+                    inHeader->nFilledLen, &mTimeStamps[mTimeStampIdx], 0);
+            if (err == VPX_CODEC_OK) {
+                inInfo->mOwnedByUs = false;
+                inQueue.erase(inQueue.begin());
+                inInfo = NULL;
+                notifyEmptyBufferDone(inHeader);
+                inHeader = NULL;
+            } else {
+                ALOGE("on2 decoder failed to decode frame. err: %d", err);
+                notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+                return;
+            }
         }
+
         mTimeStampIdx = (mTimeStampIdx + 1) % kNumBuffers;
 
         if (!outputBuffers(
@@ -266,12 +298,6 @@
         if (portWillReset) {
             return;
         }
-
-        inInfo->mOwnedByUs = false;
-        inQueue.erase(inQueue.begin());
-        inInfo = NULL;
-        notifyEmptyBufferDone(inHeader);
-        inHeader = NULL;
     }
 }
 
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index 253fa04..1de318a 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -11,6 +11,9 @@
         frameworks/av/media/libstagefright/include \
         frameworks/native/include/media/openmax \
 
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+
 LOCAL_STATIC_LIBRARIES := \
         libvpx
 
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 5c950c7..5edfbb5 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -106,24 +106,24 @@
 
 status_t SoftVPXEncoder::initEncoder() {
     vpx_codec_err_t codec_return;
+    status_t result = UNKNOWN_ERROR;
 
-    mCodecContext = new vpx_codec_ctx_t;
-    mCodecConfiguration = new vpx_codec_enc_cfg_t;
     mCodecInterface = vpx_codec_vp8_cx();
-
     if (mCodecInterface == NULL) {
-        return UNKNOWN_ERROR;
+        goto CLEAN_UP;
     }
     ALOGD("VP8: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
           (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
           mMinQuantizer, mMaxQuantizer);
+
+    mCodecConfiguration = new vpx_codec_enc_cfg_t;
     codec_return = vpx_codec_enc_config_default(mCodecInterface,
                                                 mCodecConfiguration,
                                                 0);  // Codec specific flags
 
     if (codec_return != VPX_CODEC_OK) {
         ALOGE("Error populating default configuration for vpx encoder.");
-        return UNKNOWN_ERROR;
+        goto CLEAN_UP;
     }
 
     mCodecConfiguration->g_w = mWidth;
@@ -250,7 +250,7 @@
         default:
         {
             ALOGE("Wrong number of temporal layers %zu", mTemporalLayers);
-            return UNKNOWN_ERROR;
+            goto CLEAN_UP;
         }
     }
 
@@ -272,6 +272,7 @@
         mCodecConfiguration->rc_max_quantizer = mMaxQuantizer;
     }
 
+    mCodecContext = new vpx_codec_ctx_t;
     codec_return = vpx_codec_enc_init(mCodecContext,
                                       mCodecInterface,
                                       mCodecConfiguration,
@@ -279,7 +280,7 @@
 
     if (codec_return != VPX_CODEC_OK) {
         ALOGE("Error initializing vpx encoder");
-        return UNKNOWN_ERROR;
+        goto CLEAN_UP;
     }
 
     codec_return = vpx_codec_control(mCodecContext,
@@ -287,7 +288,7 @@
                                      mDCTPartitions);
     if (codec_return != VPX_CODEC_OK) {
         ALOGE("Error setting dct partitions for vpx encoder.");
-        return UNKNOWN_ERROR;
+        goto CLEAN_UP;
     }
 
     // Extra CBR settings
@@ -313,7 +314,7 @@
         }
         if (codec_return != VPX_CODEC_OK) {
             ALOGE("Error setting cbr parameters for vpx encoder.");
-            return UNKNOWN_ERROR;
+            goto CLEAN_UP;
         }
     }
 
@@ -321,16 +322,20 @@
         free(mConversionBuffer);
         mConversionBuffer = NULL;
         if (((uint64_t)mWidth * mHeight) > ((uint64_t)INT32_MAX / 3)) {
-            ALOGE("b/25812794, Buffer size is too big.");
-            return UNKNOWN_ERROR;
+            ALOGE("b/25812794, Buffer size is too big, width=%d, height=%d.", mWidth, mHeight);
+            goto CLEAN_UP;
         }
         mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
         if (mConversionBuffer == NULL) {
             ALOGE("Allocating conversion buffer failed.");
-            return UNKNOWN_ERROR;
+            goto CLEAN_UP;
         }
     }
     return OK;
+
+CLEAN_UP:
+    releaseEncoder();
+    return result;
 }
 
 
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index e63b6b1..7159674 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -84,21 +84,20 @@
 	./omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S \
 	./omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S \
 
-ifeq ($(TARGET_ARCH),arm)
-  ifeq ($(ARCH_ARM_HAVE_NEON),true)
+
+ifeq ($(ARCH_ARM_HAVE_NEON),true)
     LOCAL_ARM_NEON   := true
-#    LOCAL_CFLAGS     := -std=c99 -D._NEON -D._OMXDL
-    LOCAL_CFLAGS     := -DH264DEC_NEON -DH264DEC_OMXDL
-    LOCAL_SRC_FILES  += $(MY_ASM) $(MY_OMXDL_C_SRC) $(MY_OMXDL_ASM_SRC)
-    LOCAL_C_INCLUDES += $(LOCAL_PATH)/./source/arm_neon_asm_gcc
-    LOCAL_C_INCLUDES += $(LOCAL_PATH)/./omxdl/arm_neon/api \
+    LOCAL_CFLAGS_arm     := -DH264DEC_NEON -DH264DEC_OMXDL
+    LOCAL_SRC_FILES_arm  := $(MY_ASM) $(MY_OMXDL_C_SRC) $(MY_OMXDL_ASM_SRC)
+    LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/./source/arm_neon_asm_gcc
+    LOCAL_C_INCLUDES_arm += $(LOCAL_PATH)/./omxdl/arm_neon/api \
                         $(LOCAL_PATH)/./omxdl/arm_neon/vc/api \
                         $(LOCAL_PATH)/./omxdl/arm_neon/vc/m4p10/api
-    # h264bsdWriteMacroblock.S does not compile with Clang.
-    LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
-  endif
 endif
 
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
+
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h
index 91e38b8..1992885 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/api/armCOMM.h
@@ -86,7 +86,7 @@
 
 /* Alignment operation */
 
-#define armAlignToBytes(Ptr,N)      (Ptr + ( ((N-(int)Ptr)&(N-1)) / sizeof(*Ptr) ))
+#define armAlignToBytes(Ptr,N)      (Ptr + ( ((N-(intptr_t)Ptr)&(N-1)) / sizeof(*Ptr) ))
 #define armAlignTo2Bytes(Ptr)       armAlignToBytes(Ptr,2)
 #define armAlignTo4Bytes(Ptr)       armAlignToBytes(Ptr,4)
 #define armAlignTo8Bytes(Ptr)       armAlignToBytes(Ptr,8)
@@ -98,8 +98,8 @@
 #define armRetDataErrIf(condition, code) if(condition) { return (code); }
 
 #ifndef ALIGNMENT_DOESNT_MATTER
-#define armIsByteAligned(Ptr,N)     ((((int)(Ptr)) % N)==0)
-#define armNotByteAligned(Ptr,N)    ((((int)(Ptr)) % N)!=0)
+#define armIsByteAligned(Ptr,N)     ((((intptr_t)(Ptr)) % N)==0)
+#define armNotByteAligned(Ptr,N)    ((((intptr_t)(Ptr)) % N)!=0)
 #else
 #define armIsByteAligned(Ptr,N)     (1)
 #define armNotByteAligned(Ptr,N)    (0)
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S
index 46e0018..e1ffb09 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Average_4x_Align_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_Average_4x4_Align0_unsafe
-    .func   armVCM4P10_Average_4x4_Align0_unsafe
 armVCM4P10_Average_4x4_Align0_unsafe:
     PUSH     {r4-r6,lr}
     LDR      r7, =0x80808080
@@ -55,10 +54,8 @@
     EOR      r4,r4,r7
     STR      r4,[r2],r3
     POP      {r4-r6,pc}
-    .endfunc
 
     .global armVCM4P10_Average_4x4_Align2_unsafe
-    .func   armVCM4P10_Average_4x4_Align2_unsafe
 armVCM4P10_Average_4x4_Align2_unsafe:
     PUSH     {r4-r6,lr}
     LDR      r7, =0x80808080
@@ -99,10 +96,8 @@
     EOR      r4,r4,r7
     STR      r4,[r2],r3
     POP      {r4-r6,pc}
-    .endfunc
 
     .global armVCM4P10_Average_4x4_Align3_unsafe
-    .func   armVCM4P10_Average_4x4_Align3_unsafe
 armVCM4P10_Average_4x4_Align3_unsafe:
     PUSH     {r4-r6,lr}
     LDR      r7, =0x80808080
@@ -143,7 +138,6 @@
     EOR      r4,r4,r7
     STR      r4,[r2],r3
     POP      {r4-r6,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S
index ca64a02..40ea4a9 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingChroma_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_DeblockingChromabSLT4_unsafe
-    .func   armVCM4P10_DeblockingChromabSLT4_unsafe
 armVCM4P10_DeblockingChromabSLT4_unsafe:
     VLD1.32  {d18[0]},[r5]!
     VSUBL.U8 q11,d5,d9
@@ -50,10 +49,8 @@
     VQMOVUN.S16 d29,q14
     VQMOVUN.S16 d24,q12
     BX       lr
-    .endfunc
 
     .global armVCM4P10_DeblockingChromabSGE4_unsafe
-    .func   armVCM4P10_DeblockingChromabSGE4_unsafe
 armVCM4P10_DeblockingChromabSGE4_unsafe:
     VHADD.U8 d13,d4,d9
     VHADD.U8 d31,d8,d5
@@ -63,7 +60,6 @@
     VRHADD.U8 d13,d13,d5
     VRHADD.U8 d31,d31,d9
     BX       lr
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S
index 193bc5e..05fb2c5 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DeblockingLuma_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_DeblockingLumabSLT4_unsafe
-    .func   armVCM4P10_DeblockingLumabSLT4_unsafe
 armVCM4P10_DeblockingLumabSLT4_unsafe:
     VSUBL.U8 q11,d5,d9
     VLD1.8   {d18[]},[r5]!
@@ -66,10 +65,8 @@
     VBIF     d24,d8,d16
     VBIF     d25,d9,d12
     BX       lr
-    .endfunc
 
     .global armVCM4P10_DeblockingLumabSGE4_unsafe
-    .func   armVCM4P10_DeblockingLumabSGE4_unsafe
 armVCM4P10_DeblockingLumabSGE4_unsafe:
     VSHR.U8  d19,d0,#2
     VADD.I8  d19,d19,d15
@@ -111,7 +108,6 @@
     VBIF     d24,d8,d16
     VBIF     d28,d10,d12
     BX       lr
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
index 8e0db37..27c0452 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_DecodeCoeffsToPair_s.S
@@ -42,7 +42,6 @@
     .hidden   armVCM4P10_ZigZag_4x4
 
     .global armVCM4P10_DecodeCoeffsToPair
-    .func   armVCM4P10_DecodeCoeffsToPair
 armVCM4P10_DecodeCoeffsToPair:
     PUSH     {r4-r12,lr}
     SUB      sp,sp,#0x40
@@ -302,7 +301,6 @@
 L0x35c:
     ADD      sp,sp,#0x40
     POP      {r4-r12,pc}
-    .endfunc
 
 .LarmVCM4P10_CAVLCCoeffTokenTables:
     .word   armVCM4P10_CAVLCCoeffTokenTables-(P0+8)
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S
index 7206d76..1de9004 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Align_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_InterpolateLuma_HorAlign9x_unsafe
-    .func   armVCM4P10_InterpolateLuma_HorAlign9x_unsafe
 armVCM4P10_InterpolateLuma_HorAlign9x_unsafe:
     MOV      r12,r8
     AND      r7,r0,#3
@@ -83,10 +82,8 @@
     MOV      r0,r12
     MOV      r1,#0xc
     BX       lr
-    .endfunc
 
     .global armVCM4P10_InterpolateLuma_VerAlign4x_unsafe
-    .func   armVCM4P10_InterpolateLuma_VerAlign4x_unsafe
 armVCM4P10_InterpolateLuma_VerAlign4x_unsafe:
     AND      r7,r0,#3
     BIC      r0,r0,#3
@@ -132,7 +129,6 @@
     SUB      r0,r8,#0x1c
     MOV      r1,#4
     BX       lr
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S
index e41d662..7ba2890 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_Copy_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_InterpolateLuma_Copy4x4_unsafe
-    .func   armVCM4P10_InterpolateLuma_Copy4x4_unsafe
 armVCM4P10_InterpolateLuma_Copy4x4_unsafe:
     PUSH     {r4-r6,lr}
     AND      r12,r0,#3
@@ -114,7 +113,6 @@
     STR      r8,[r2],r3
 Copy4x4End:
     POP      {r4-r6,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S
index c8f5cda..8b2c678 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_InterpolateLuma_HorDiagCopy_unsafe
-    .func   armVCM4P10_InterpolateLuma_HorDiagCopy_unsafe
 armVCM4P10_InterpolateLuma_HorDiagCopy_unsafe:
     PUSH     {r4-r6,lr}
     MOV      lr,#4
@@ -57,10 +56,8 @@
     SUB      r0,r7,#0x20
     MOV      r1,#8
     POP      {r4-r6,pc}
-    .endfunc
 
     .global armVCM4P10_InterpolateLuma_VerDiagCopy_unsafe
-    .func   armVCM4P10_InterpolateLuma_VerDiagCopy_unsafe
 armVCM4P10_InterpolateLuma_VerDiagCopy_unsafe:
     PUSH     {r4-r6,lr}
     LDR      r6, =0xfe00fe0
@@ -116,7 +113,6 @@
     SUB      r0,r7,#0x18
     MOV      r1,#4
     POP      {r4-r6,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S
index f5868c0..77aa927 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe
-    .func   armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe
 armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe:
     PUSH     {r4-r12,lr}
     VLD1.8   {d0,d1},[r0],r1
@@ -173,7 +172,6 @@
     VQMOVN.U16 d4,q2
     VQMOVN.U16 d6,q3
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S
index 065995d..e5f7f1c 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe
-    .func   armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe
 armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe:
     PUSH     {r4-r12,lr}
     VLD1.8   {d0,d1},[r0],r1
@@ -128,7 +127,6 @@
     VQMOVN.U16 d4,q2
     VQMOVN.U16 d6,q3
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S
index 1e2d16b..393d385 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe
-    .func   armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe
 armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe:
     PUSH     {r4-r12,lr}
     VLD1.8   {d22,d23},[r0],r1
@@ -81,7 +80,6 @@
     VQRSHRUN.S16 d26,q13,#5
     VQRSHRUN.S16 d28,q14,#5
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S
index c7def2a..698e7b5 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe
-    .func   armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe
 armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe:
     PUSH     {r4-r12,lr}
     VLD1.8   {d7},[r0],r1
@@ -67,7 +66,6 @@
     VQRSHRUN.S16 d4,q2,#5
     VQRSHRUN.S16 d6,q3,#5
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S
index 2f4293f..e469516 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_Interpolate_Chroma_s.S
@@ -38,7 +38,6 @@
     .word   WidthIs8MVIsZero-(P0+8)
 
     .global armVCM4P10_Interpolate_Chroma
-    .func   armVCM4P10_Interpolate_Chroma
 armVCM4P10_Interpolate_Chroma:
     PUSH     {r4-r12,lr}
     VPUSH    {d8-d15}
@@ -183,7 +182,6 @@
     MOV      r0,#0
     VPOP     {d8-d15}
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S
index d4cedb5..e18bec7 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_TransformResidual4x4_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_TransformResidual4x4
-    .func   armVCM4P10_TransformResidual4x4
 armVCM4P10_TransformResidual4x4:
     VPUSH    {d8}
     VLD4.16  {d0,d1,d2,d3},[r1]
@@ -61,7 +60,6 @@
     VST1.16  {d0,d1,d2,d3},[r0]
     VPOP     {d8}
     BX       lr
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S
index 1652dc6..b97efcb 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/armVCM4P10_UnpackBlock4x4_s.S
@@ -24,9 +24,9 @@
     .arm
     .fpu neon
     .text
+    .syntax unified
 
     .global armVCM4P10_UnpackBlock4x4
-    .func   armVCM4P10_UnpackBlock4x4
 armVCM4P10_UnpackBlock4x4:
     PUSH     {r4-r8,lr}
     LDR      r2,[r0,#0]
@@ -40,16 +40,15 @@
     STRD     r4,r5,[r1,#0x18]
 unpackLoop:
     TST      r3,#0x10
-    LDRNESB  r5,[r2,#1]
-    LDRNEB   r4,[r2],#2
+    LDRSBNE  r5,[r2,#1]
+    LDRBNE   r4,[r2],#2
     AND      r6,r7,r3,LSL #1
-    LDREQSB  r4,[r2],#1
+    LDRSBEQ  r4,[r2],#1
     ORRNE    r4,r4,r5,LSL #8
     TST      r3,#0x20
-    LDREQB   r3,[r2],#1
+    LDRBEQ   r3,[r2],#1
     STRH     r4,[r1,r6]
     BEQ      unpackLoop
     STR      r2,[r0,#0]
     POP      {r4-r8,pc}
-    .endfunc
     .end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S
index 90b0947..6a99bde 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DeblockLuma_I.S
@@ -26,7 +26,6 @@
     .text
 
     .global omxVCM4P10_DeblockLuma_I
-    .func   omxVCM4P10_DeblockLuma_I
 omxVCM4P10_DeblockLuma_I:
     PUSH     {r4-r9,lr}
     MOVS     r6,r0
@@ -76,7 +75,6 @@
     BL       omxVCM4P10_FilterDeblockingLuma_HorEdge_I
     ADD      sp,sp,#0xc
     POP      {r4-r9,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S
index 4a74594..17c5d8b 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global omxVCM4P10_DequantTransformResidualFromPairAndAdd
-    .func   omxVCM4P10_DequantTransformResidualFromPairAndAdd
 omxVCM4P10_DequantTransformResidualFromPairAndAdd:
     PUSH     {r4-r12,lr}
     VPUSH    {d8-d9}
@@ -131,7 +130,6 @@
     ADD      sp,sp,#0x20
     VPOP     {d8-d9}
     POP      {r4-r12,pc}
-    .endfunc
 
 .LarmVCM4P10_QPModuloTable:
     .word   armVCM4P10_QPModuloTable-(P0+8)
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S
index f20fb78..4a83516 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_HorEdge_I_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global omxVCM4P10_FilterDeblockingChroma_HorEdge_I
-    .func   omxVCM4P10_FilterDeblockingChroma_HorEdge_I
 omxVCM4P10_FilterDeblockingChroma_HorEdge_I:
     PUSH     {r4-r10,lr}
     VPUSH    {d8-d15}
@@ -96,7 +95,6 @@
     MOV      r0,#0
     VPOP     {d8-d15}
     POP      {r4-r10,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S
index 003526e..fe10931 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingChroma_VerEdge_I_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global omxVCM4P10_FilterDeblockingChroma_VerEdge_I
-    .func   omxVCM4P10_FilterDeblockingChroma_VerEdge_I
 omxVCM4P10_FilterDeblockingChroma_VerEdge_I:
     PUSH     {r4-r12,lr}
     VPUSH    {d8-d15}
@@ -132,7 +131,6 @@
     MOV      r0,#0
     VPOP     {d8-d15}
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S
index 7ddc42e..84ffad2 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global omxVCM4P10_FilterDeblockingLuma_HorEdge_I
-    .func   omxVCM4P10_FilterDeblockingLuma_HorEdge_I
 omxVCM4P10_FilterDeblockingLuma_HorEdge_I:
     PUSH     {r4-r12,lr}
     VPUSH    {d8-d15}
@@ -116,7 +115,6 @@
     MOV      r0,#0
     VPOP     {d8-d15}
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S
index f71aceb..f2a3682 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global omxVCM4P10_FilterDeblockingLuma_VerEdge_I
-    .func   omxVCM4P10_FilterDeblockingLuma_VerEdge_I
 omxVCM4P10_FilterDeblockingLuma_VerEdge_I:
     PUSH     {r4-r12,lr}
     VPUSH    {d8-d15}
@@ -166,7 +165,6 @@
     MOV      r0,#0
     VPOP     {d8-d15}
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S
index 000fbeb..314eabd 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_InterpolateLuma_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global omxVCM4P10_InterpolateLuma
-    .func   omxVCM4P10_InterpolateLuma
 omxVCM4P10_InterpolateLuma:
     PUSH     {r4-r12,lr}
     VPUSH    {d8-d15}
@@ -332,7 +331,6 @@
     ADD      sp,sp,#0x10
     VPOP     {d8-d15}
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S
index 4e2cff6..50d1350 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntraChroma_8x8_s.S
@@ -36,7 +36,6 @@
     .hword   1, 2, 3,4
 
     .global omxVCM4P10_PredictIntraChroma_8x8
-    .func   omxVCM4P10_PredictIntraChroma_8x8
 omxVCM4P10_PredictIntraChroma_8x8:
     PUSH     {r4-r10,lr}
     VPUSH    {d8-d15}
@@ -226,7 +225,6 @@
     MOV      r0,#0
     VPOP     {d8-d15}
     POP      {r4-r10,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S
index c71c93b..0044636 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_16x16_s.S
@@ -42,7 +42,6 @@
 
 
     .global omxVCM4P10_PredictIntra_16x16
-    .func   omxVCM4P10_PredictIntra_16x16
 omxVCM4P10_PredictIntra_16x16:
     PUSH     {r4-r12,lr}
     VPUSH    {d8-d15}
@@ -246,7 +245,6 @@
     MOV      r0,#0
     VPOP     {d8-d15}
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S
index cd5d356..d4c8485 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_PredictIntra_4x4_s.S
@@ -35,7 +35,6 @@
     .word OMX_VC_4x4_HU-(P0+8)
 
     .global omxVCM4P10_PredictIntra_4x4
-    .func   omxVCM4P10_PredictIntra_4x4
 omxVCM4P10_PredictIntra_4x4:
     PUSH     {r4-r12,lr}
     VPUSH    {d8-d12}
@@ -270,6 +269,5 @@
     MOV      r0,#0
     VPOP     {d8-d12}
     POP      {r4-r12,pc}
-    .endfunc
 
     .end
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S
index 5570892..74f5103 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantChromaDCFromPair_s.S
@@ -24,9 +24,9 @@
     .arm
     .fpu neon
     .text
+    .syntax unified
 
     .global omxVCM4P10_TransformDequantChromaDCFromPair
-    .func   omxVCM4P10_TransformDequantChromaDCFromPair
 omxVCM4P10_TransformDequantChromaDCFromPair:
     push    {r4-r10, lr}
     ldr     r9, [r0,#0]
@@ -36,13 +36,13 @@
     ldrb    r6, [r9], #1
 unpackLoop:
     tst     r6, #0x10
-    ldrnesb r5, [r9, #1]
-    ldrneb  r4, [r9], #2
+    ldrsbne r5, [r9, #1]
+    ldrbne  r4, [r9], #2
     and     r7, r8, r6, lsl #1
-    ldreqsb r4, [r9], #1
+    ldrsbeq r4, [r9], #1
     orrne   r4, r4, r5, lsl #8
     tst     r6, #0x20
-    ldreqb  r6, [r9], #1
+    ldrbeq  r6, [r9], #1
     strh    r4, [r1, r7]
     beq     unpackLoop
     ldmia   r1, {r3, r4}
@@ -66,7 +66,6 @@
     vst1.16    {d2}, [r1]
     mov     r0, #0
     pop     {r4-r10, pc}
-    .endfunc
 
 .LarmVCM4P10_QPDivTable:
     .word armVCM4P10_QPDivTable-(P0+8)
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S
index 5b6eee0..a01030a 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src_gcc/omxVCM4P10_TransformDequantLumaDCFromPair_s.S
@@ -26,7 +26,6 @@
     .text
 
     .global armVCM4P10_InvTransformDequantLumaDC4x4
-    .func   armVCM4P10_InvTransformDequantLumaDC4x4
 armVCM4P10_InvTransformDequantLumaDC4x4:
     PUSH     {r4-r6,lr}
     VPUSH    {d8-d13}
@@ -73,7 +72,6 @@
     VST1.16  {d0,d1,d2,d3},[r0]
     VPOP     {d8-d13}
     POP      {r4-r6,pc}
-    .endfunc
 
 .LarmVCM4P10_QPDivTable:
     .word armVCM4P10_QPDivTable-(P0+8)
@@ -81,7 +79,6 @@
     .word armVCM4P10_VMatrixQPModTable-(P1+8)
 
 .global omxVCM4P10_TransformDequantLumaDCFromPair
-.func   omxVCM4P10_TransformDequantLumaDCFromPair
 omxVCM4P10_TransformDequantLumaDCFromPair:
     PUSH     {r4-r6,lr}
     MOV      r4,r1
@@ -92,7 +89,6 @@
     BL       armVCM4P10_InvTransformDequantLumaDC4x4
     MOV      r0,#0
     POP      {r4-r6,pc}
-    .endfunc
 
     .end
 
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h b/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h
index fbb97e2..7304863 100644
--- a/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h
+++ b/media/libstagefright/codecs/on2/h264dec/omxdl/reference/api/armCOMM.h
@@ -86,7 +86,7 @@
 
 /* Alignment operation */
 
-#define armAlignToBytes(Ptr,N)      (Ptr + ( ((N-(int)Ptr)&(N-1)) / sizeof(*Ptr) ))
+#define armAlignToBytes(Ptr,N)      (Ptr + ( ((N-(intptr_t)Ptr)&(N-1)) / sizeof(*Ptr) ))
 #define armAlignTo2Bytes(Ptr)       armAlignToBytes(Ptr,2)
 #define armAlignTo4Bytes(Ptr)       armAlignToBytes(Ptr,4)
 #define armAlignTo8Bytes(Ptr)       armAlignToBytes(Ptr,8)
@@ -98,8 +98,8 @@
 #define armRetDataErrIf(condition, code) if(condition) { return (code); }
 
 #ifndef ALIGNMENT_DOESNT_MATTER
-#define armIsByteAligned(Ptr,N)     ((((int)(Ptr)) % N)==0)
-#define armNotByteAligned(Ptr,N)    ((((int)(Ptr)) % N)!=0)
+#define armIsByteAligned(Ptr,N)     ((((intptr_t)(Ptr)) % N)==0)
+#define armNotByteAligned(Ptr,N)    ((((intptr_t)(Ptr)) % N)!=0)
 #else
 #define armIsByteAligned(Ptr,N)     (1)
 #define armNotByteAligned(Ptr,N)    (0)
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S
index f39f5c4..969a75c 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/asm_common.S
@@ -31,11 +31,9 @@
     .global \name
 .endif
     .type   \name, %function
-    .func   \name
 \name:
     .endm
 
     .macro endfunction
-    .endfunc
     .endm
 
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S
index c8a940e..3c2752f 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdClearMbLayer.S
@@ -16,7 +16,7 @@
 
 #include "asm_common.S"
 
-    preserve8
+    PRESERVE8
 
     .fpu neon
     .text
@@ -29,7 +29,7 @@
 
 /* -- NEON registers -- */
 
-#define qZero   Q0.U8
+#define qZero   Q0
 
 /*------------------------------------------------------------------------------
 
@@ -47,17 +47,17 @@
 
 function h264bsdClearMbLayer, export=1
 
-    VMOV    qZero, #0
+    VMOV.I8 qZero, #0
     ADD     pTmp, pMbLayer, #16
     MOV     step, #32
     SUBS    size, size, #64
 
 loop:
-    VST1    {qZero}, [pMbLayer], step
+    VST1.8  {qZero}, [pMbLayer], step
     SUBS    size, size, #64
-    VST1    {qZero}, [pTmp], step
-    VST1    {qZero}, [pMbLayer], step
-    VST1    {qZero}, [pTmp], step
+    VST1.8  {qZero}, [pTmp], step
+    VST1.8  {qZero}, [pMbLayer], step
+    VST1.8  {qZero}, [pTmp], step
     BCS     loop
 
     BX      lr
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S
index 05253d0..b1c9f60 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdCountLeadingZeros.S
@@ -15,7 +15,7 @@
 @
 #include "asm_common.S"
 
-    preserve8
+    PRESERVE8
     .arm
     .text
 
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S
index 6955b9a..6ed6227 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFillRow7.S
@@ -16,7 +16,7 @@
 
 #include "asm_common.S"
 
-    preserve8
+    PRESERVE8
 
     .fpu neon
     .text
@@ -33,12 +33,12 @@
 
 /* -- NEON registers -- */
 
-#define qTmp0     Q0.U8
-#define qTmp1     Q1.U8
-#define dTmp0     D0.U8
-#define dTmp1     D1.U8
-#define dTmp2     D2.U8
-#define dTmp3     D3.U8
+#define qTmp0     Q0
+#define qTmp1     Q1
+#define dTmp0     D0
+#define dTmp1     D1
+#define dTmp2     D2
+#define dTmp3     D3
 
 /*
 void h264bsdFillRow7(const u8 * ref, u8 * fill, i32 left, i32 center,
@@ -74,40 +74,40 @@
         B        case_8
 
 case_8:
-        VLD1    {qTmp0, qTmp1}, [ref]!
+        VLD1.8  {qTmp0, qTmp1}, [ref]!
         SUB     center, center, #32
-        VST1    {qTmp0}, [fill]!
-        VST1    {qTmp1}, [fill]!
+        VST1.8  {qTmp0}, [fill]!
+        VST1.8  {qTmp1}, [fill]!
         B       loop_center
 case_7:
-        VLD1    {dTmp0,dTmp1,dTmp2}, [ref]!
+        VLD1.8  {dTmp0,dTmp1,dTmp2}, [ref]!
         SUB     center, center, #28
         LDR     tmp2, [ref], #4
-        VST1    {dTmp0,dTmp1,dTmp2}, [fill]!
+        VST1.8  {dTmp0,dTmp1,dTmp2}, [fill]!
         STR     tmp2, [fill],#4
         B       loop_center
 case_6:
-        VLD1    {dTmp0,dTmp1,dTmp2}, [ref]!
+        VLD1.8  {dTmp0,dTmp1,dTmp2}, [ref]!
         SUB     center, center, #24
-        VST1    {dTmp0,dTmp1,dTmp2}, [fill]!
+        VST1.8  {dTmp0,dTmp1,dTmp2}, [fill]!
         B       loop_center
 case_5:
-        VLD1    {qTmp0}, [ref]!
+        VLD1.8  {qTmp0}, [ref]!
         SUB     center, center, #20
         LDR     tmp2, [ref], #4
-        VST1    {qTmp0}, [fill]!
+        VST1.8  {qTmp0}, [fill]!
         STR     tmp2, [fill],#4
         B       loop_center
 case_4:
-        VLD1    {qTmp0}, [ref]!
+        VLD1.8  {qTmp0}, [ref]!
         SUB     center, center, #16
-        VST1    {qTmp0}, [fill]!
+        VST1.8  {qTmp0}, [fill]!
         B       loop_center
 case_3:
-        VLD1    {dTmp0}, [ref]!
+        VLD1.8  {dTmp0}, [ref]!
         SUB     center, center, #12
         LDR     tmp2, [ref], #4
-        VST1    dTmp0, [fill]!
+        VST1.8  dTmp0, [fill]!
         STR     tmp2, [fill],#4
         B       loop_center
 case_2:
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S
index b3f3191..aa88471 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdFlushBits.S
@@ -16,7 +16,7 @@
 
 #include "asm_common.S"
 
-    preserve8
+    PRESERVE8
 
     .arm
     .text
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S
index 495d560..4093b92 100644
--- a/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S
+++ b/media/libstagefright/codecs/on2/h264dec/source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S
@@ -16,8 +16,8 @@
 
 #include "asm_common.S"
 
-    require8
-    preserve8
+    REQUIRE8
+    PRESERVE8
 
     .arm
     .fpu neon
@@ -34,39 +34,39 @@
 
 /* -- NEON registers -- */
 
-#define qRow0     Q0.U8
-#define qRow1     Q1.U8
-#define qRow2     Q2.U8
-#define qRow3     Q3.U8
-#define qRow4     Q4.U8
-#define qRow5     Q5.U8
-#define qRow6     Q6.U8
-#define qRow7     Q7.U8
-#define qRow8     Q8.U8
-#define qRow9     Q9.U8
-#define qRow10    Q10.U8
-#define qRow11    Q11.U8
-#define qRow12    Q12.U8
-#define qRow13    Q13.U8
-#define qRow14    Q14.U8
-#define qRow15    Q15.U8
+#define qRow0     Q0
+#define qRow1     Q1
+#define qRow2     Q2
+#define qRow3     Q3
+#define qRow4     Q4
+#define qRow5     Q5
+#define qRow6     Q6
+#define qRow7     Q7
+#define qRow8     Q8
+#define qRow9     Q9
+#define qRow10    Q10
+#define qRow11    Q11
+#define qRow12    Q12
+#define qRow13    Q13
+#define qRow14    Q14
+#define qRow15    Q15
 
-#define dRow0     D0.U8
-#define dRow1     D1.U8
-#define dRow2     D2.U8
-#define dRow3     D3.U8
-#define dRow4     D4.U8
-#define dRow5     D5.U8
-#define dRow6     D6.U8
-#define dRow7     D7.U8
-#define dRow8     D8.U8
-#define dRow9     D9.U8
-#define dRow10    D10.U8
-#define dRow11    D11.U8
-#define dRow12    D12.U8
-#define dRow13    D13.U8
-#define dRow14    D14.U8
-#define dRow15    D15.U8
+#define dRow0     D0
+#define dRow1     D1
+#define dRow2     D2
+#define dRow3     D3
+#define dRow4     D4
+#define dRow5     D5
+#define dRow6     D6
+#define dRow7     D7
+#define dRow8     D8
+#define dRow9     D9
+#define dRow10    D10
+#define dRow11    D11
+#define dRow12    D12
+#define dRow13    D13
+#define dRow14    D14
+#define dRow15    D15
 
 /*------------------------------------------------------------------------------
 
@@ -99,59 +99,58 @@
 
 
 @   Write luma
-    VLD1    {qRow0, qRow1}, [data]!
+    VLD1.8  {qRow0, qRow1}, [data]!
     LSL     width, width, #4
-    VLD1    {qRow2, qRow3}, [data]!
+    VLD1.8  {qRow2, qRow3}, [data]!
     LSR     cwidth, width, #1
-    VST1    {qRow0}, [luma,:128], width
-    VLD1    {qRow4, qRow5}, [data]!
-    VST1    {qRow1}, [luma,:128], width
-    VLD1    {qRow6, qRow7}, [data]!
-    VST1    {qRow2}, [luma,:128], width
-    VLD1    {qRow8, qRow9}, [data]!
-    VST1    {qRow3}, [luma,:128], width
-    VLD1    {qRow10, qRow11}, [data]!
-    VST1    {qRow4}, [luma,:128], width
-    VLD1    {qRow12, qRow13}, [data]!
-    VST1    {qRow5}, [luma,:128], width
-    VLD1    {qRow14, qRow15}, [data]!
-    VST1    {qRow6}, [luma,:128], width
+    VST1.8  {qRow0}, [luma,:128], width
+    VLD1.8  {qRow4, qRow5}, [data]!
+    VST1.8  {qRow1}, [luma,:128], width
+    VLD1.8  {qRow6, qRow7}, [data]!
+    VST1.8  {qRow2}, [luma,:128], width
+    VLD1.8  {qRow8, qRow9}, [data]!
+    VST1.8  {qRow3}, [luma,:128], width
+    VLD1.8  {qRow10, qRow11}, [data]!
+    VST1.8  {qRow4}, [luma,:128], width
+    VLD1.8  {qRow12, qRow13}, [data]!
+    VST1.8  {qRow5}, [luma,:128], width
+    VLD1.8  {qRow14, qRow15}, [data]!
+    VST1.8  {qRow6}, [luma,:128], width
 
-    VLD1    {qRow0, qRow1}, [data]! ;//cb rows 0,1,2,3
-    VST1    {qRow7}, [luma,:128], width
-    VLD1    {qRow2, qRow3}, [data]! ;//cb rows 4,5,6,7
-    VST1    {qRow8}, [luma,:128], width
-    VLD1    {qRow4, qRow5}, [data]! ;//cr rows 0,1,2,3
-    VST1    {qRow9}, [luma,:128], width
-    VLD1    {qRow6, qRow7}, [data]! ;//cr rows 4,5,6,7
-    VST1    {qRow10}, [luma,:128], width
-    VST1    {dRow0}, [cb,:64], cwidth
-    VST1    {dRow8}, [cr,:64], cwidth
-    VST1    {qRow11}, [luma,:128], width
-    VST1    {dRow1}, [cb,:64], cwidth
-    VST1    {dRow9}, [cr,:64], cwidth
-    VST1    {qRow12}, [luma,:128], width
-    VST1    {dRow2}, [cb,:64], cwidth
-    VST1    {dRow10}, [cr,:64], cwidth
-    VST1    {qRow13}, [luma,:128], width
-    VST1    {dRow3}, [cb,:64], cwidth
-    VST1    {dRow11}, [cr,:64], cwidth
-    VST1    {qRow14}, [luma,:128], width
-    VST1    {dRow4}, [cb,:64], cwidth
-    VST1    {dRow12}, [cr,:64], cwidth
-    VST1    {qRow15}, [luma]
-    VST1    {dRow5}, [cb,:64], cwidth
-    VST1    {dRow13}, [cr,:64], cwidth
-    VST1    {dRow6}, [cb,:64], cwidth
-    VST1    {dRow14}, [cr,:64], cwidth
-    VST1    {dRow7}, [cb,:64]
-    VST1    {dRow15}, [cr,:64]
+    VLD1.8  {qRow0, qRow1}, [data]! ;//cb rows 0,1,2,3
+    VST1.8  {qRow7}, [luma,:128], width
+    VLD1.8  {qRow2, qRow3}, [data]! ;//cb rows 4,5,6,7
+    VST1.8  {qRow8}, [luma,:128], width
+    VLD1.8  {qRow4, qRow5}, [data]! ;//cr rows 0,1,2,3
+    VST1.8  {qRow9}, [luma,:128], width
+    VLD1.8  {qRow6, qRow7}, [data]! ;//cr rows 4,5,6,7
+    VST1.8  {qRow10}, [luma,:128], width
+    VST1.8  {dRow0}, [cb,:64], cwidth
+    VST1.8  {dRow8}, [cr,:64], cwidth
+    VST1.8  {qRow11}, [luma,:128], width
+    VST1.8  {dRow1}, [cb,:64], cwidth
+    VST1.8  {dRow9}, [cr,:64], cwidth
+    VST1.8  {qRow12}, [luma,:128], width
+    VST1.8  {dRow2}, [cb,:64], cwidth
+    VST1.8  {dRow10}, [cr,:64], cwidth
+    VST1.8  {qRow13}, [luma,:128], width
+    VST1.8  {dRow3}, [cb,:64], cwidth
+    VST1.8  {dRow11}, [cr,:64], cwidth
+    VST1.8  {qRow14}, [luma,:128], width
+    VST1.8  {dRow4}, [cb,:64], cwidth
+    VST1.8  {dRow12}, [cr,:64], cwidth
+    VST1.8  {qRow15}, [luma]
+    VST1.8  {dRow5}, [cb,:64], cwidth
+    VST1.8  {dRow13}, [cr,:64], cwidth
+    VST1.8  {dRow6}, [cb,:64], cwidth
+    VST1.8  {dRow14}, [cr,:64], cwidth
+    VST1.8  {dRow7}, [cb,:64]
+    VST1.8  {dRow15}, [cr,:64]
 
     VPOP    {q4-q7}
     POP     {r4-r6,pc}
 @    BX      lr
 
-    .endfunc
 
 
 
diff --git a/media/libstagefright/codecs/opus/dec/Android.mk b/media/libstagefright/codecs/opus/dec/Android.mk
index 2379c5f..f272763 100644
--- a/media/libstagefright/codecs/opus/dec/Android.mk
+++ b/media/libstagefright/codecs/opus/dec/Android.mk
@@ -13,7 +13,10 @@
         libopus libstagefright libstagefright_omx \
         libstagefright_foundation libutils liblog
 
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
+
 LOCAL_MODULE := libstagefright_soft_opusdec
 LOCAL_MODULE_TAGS := optional
 
-include $(BUILD_SHARED_LIBRARY)
\ No newline at end of file
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/raw/Android.mk b/media/libstagefright/codecs/raw/Android.mk
index 87080e7..e454c84 100644
--- a/media/libstagefright/codecs/raw/Android.mk
+++ b/media/libstagefright/codecs/raw/Android.mk
@@ -9,6 +9,8 @@
         frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
         libstagefright_omx libstagefright_foundation libutils liblog
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
index 4f7ae95..acb2b37 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.cpp
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -42,7 +42,9 @@
     : SimpleSoftOMXComponent(name, callbacks, appData, component),
       mSignalledError(false),
       mChannelCount(2),
-      mSampleRate(44100) {
+      mSampleRate(44100),
+      mNumericalData(OMX_NumericalDataSigned),
+      mBitsPerSample(16) {
     initPorts();
     CHECK_EQ(initDecoder(), (status_t)OK);
 }
@@ -111,10 +113,10 @@
                 return OMX_ErrorUndefined;
             }
 
-            pcmParams->eNumData = OMX_NumericalDataSigned;
+            pcmParams->eNumData = (OMX_NUMERICALDATATYPE)mNumericalData;
             pcmParams->eEndian = OMX_EndianBig;
             pcmParams->bInterleaved = OMX_TRUE;
-            pcmParams->nBitPerSample = 16;
+            pcmParams->nBitPerSample = mBitsPerSample;
             pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
             pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
             pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
@@ -166,12 +168,23 @@
 
             mChannelCount = pcmParams->nChannels;
             mSampleRate = pcmParams->nSamplingRate;
+            mNumericalData = pcmParams->eNumData;
+            mBitsPerSample = pcmParams->nBitPerSample;
 
             return OMX_ErrorNone;
         }
 
         default:
-            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+        {
+            OMX_ERRORTYPE err = SimpleSoftOMXComponent::internalSetParameter(
+                    index, params);
+            // In case inPort->mDef.nBufferSize changed, the output buffer size
+            // should match the input buffer size.
+            PortInfo *inPort = editPortInfo(0);
+            PortInfo *outPort = editPortInfo(1);
+            outPort->mDef.nBufferSize = inPort->mDef.nBufferSize;
+            return err;
+        }
     }
 }
 
diff --git a/media/libstagefright/codecs/raw/SoftRaw.h b/media/libstagefright/codecs/raw/SoftRaw.h
index 015c4a3..80906b4 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.h
+++ b/media/libstagefright/codecs/raw/SoftRaw.h
@@ -50,6 +50,8 @@
 
     int32_t mChannelCount;
     int32_t mSampleRate;
+    int32_t mNumericalData;
+    int32_t mBitsPerSample;
 
     void initPorts();
     status_t initDecoder();
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.mk b/media/libstagefright/codecs/vorbis/dec/Android.mk
index 217a6d2..039be6f 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.mk
+++ b/media/libstagefright/codecs/vorbis/dec/Android.mk
@@ -17,5 +17,7 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 5f4e346..be04e08 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -56,6 +56,7 @@
       mNumFramesLeftOnPage(-1),
       mSawInputEos(false),
       mSignalledOutputEos(false),
+      mSignalledError(false),
       mOutputPortSettingsChange(NONE) {
     initPorts();
     CHECK_EQ(initDecoder(), (status_t)OK);
@@ -83,7 +84,7 @@
     def.eDir = OMX_DirInput;
     def.nBufferCountMin = kNumBuffers;
     def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 8192;
+    def.nBufferSize = kMaxNumSamplesPerBuffer * sizeof(int16_t);
     def.bEnabled = OMX_TRUE;
     def.bPopulated = OMX_FALSE;
     def.eDomain = OMX_PortDomainAudio;
@@ -263,7 +264,7 @@
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
 
-    if (mOutputPortSettingsChange != NONE) {
+    if (mSignalledError || mOutputPortSettingsChange != NONE) {
         return;
     }
 
@@ -277,6 +278,7 @@
             ALOGE("Too small input buffer: %zu bytes", size);
             android_errorWriteLog(0x534e4554, "27833616");
             notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+            mSignalledError = true;
             return;
         }
 
@@ -293,9 +295,19 @@
             mVi = new vorbis_info;
             vorbis_info_init(mVi);
 
-            CHECK_EQ(0, _vorbis_unpack_info(mVi, &bits));
+            int ret = _vorbis_unpack_info(mVi, &bits);
+            if (ret != 0) {
+                notify(OMX_EventError, OMX_ErrorUndefined, ret, NULL);
+                mSignalledError = true;
+                return;
+            }
         } else {
-            CHECK_EQ(0, _vorbis_unpack_books(mVi, &bits));
+            int ret = _vorbis_unpack_books(mVi, &bits);
+            if (ret != 0) {
+                notify(OMX_EventError, OMX_ErrorUndefined, ret, NULL);
+                mSignalledError = true;
+                return;
+            }
 
             CHECK(mState == NULL);
             mState = new vorbis_dsp_state;
@@ -333,7 +345,13 @@
             }
 
             if (inHeader->nFilledLen || !mSawInputEos) {
-                CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
+                if (inHeader->nFilledLen < sizeof(numPageSamples)) {
+                    notify(OMX_EventError, OMX_ErrorBadParameter, 0, NULL);
+                    mSignalledError = true;
+                    ALOGE("onQueueFilled, input header has nFilledLen %u, expected %zu",
+                            inHeader->nFilledLen, sizeof(numPageSamples));
+                    return;
+                }
                 memcpy(&numPageSamples,
                        inHeader->pBuffer
                         + inHeader->nOffset + inHeader->nFilledLen - 4,
@@ -445,6 +463,9 @@
         // depend on fragments from the last one decoded.
 
         mNumFramesOutput = 0;
+        mSawInputEos = false;
+        mSignalledOutputEos = false;
+        mNumFramesLeftOnPage = -1;
         vorbis_dsp_restart(mState);
     }
 }
@@ -466,6 +487,7 @@
 
     mSawInputEos = false;
     mSignalledOutputEos = false;
+    mSignalledError = false;
     mOutputPortSettingsChange = NONE;
 }
 
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
index 1d00816..30d137b 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.h
@@ -61,6 +61,7 @@
     int32_t mNumFramesLeftOnPage;
     bool mSawInputEos;
     bool mSignalledOutputEos;
+    bool mSignalledError;
 
     enum {
         NONE,
diff --git a/media/libstagefright/colorconversion/Android.mk b/media/libstagefright/colorconversion/Android.mk
index 4f7c48f..32e2dfd 100644
--- a/media/libstagefright/colorconversion/Android.mk
+++ b/media/libstagefright/colorconversion/Android.mk
@@ -11,6 +11,7 @@
 
 LOCAL_CFLAGS += -Werror
 LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright_color_conversion
 
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index e92c192..bbc4d26 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -363,6 +363,16 @@
         }
     }
 
+    // TODO: propagate color aspects to software renderer to allow better
+    // color conversion to RGB. For now, just mark dataspace for YUV rendering.
+    android_dataspace dataSpace;
+    if (format->findInt32("android._dataspace", (int32_t *)&dataSpace) && dataSpace != mDataSpace) {
+        ALOGD("setting dataspace on output surface to #%x", dataSpace);
+        if ((err = native_window_set_buffers_data_space(mNativeWindow.get(), dataSpace))) {
+            ALOGW("failed to set dataspace on surface (%d)", err);
+        }
+        mDataSpace = dataSpace;
+    }
     if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf, -1)) != 0) {
         ALOGW("Surface::queueBuffer returned error %d", err);
     } else {
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 81a6d00..b03c769 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -84,6 +84,7 @@
             <Limit name="block-size" value="16x16" />
             <Limit name="blocks-per-second" range="1-244800" />
             <Limit name="bitrate" range="1-12000000" />
+            <Feature name="intra-refresh" />
         </MediaCodec>
         <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
             <!-- profiles and levels:  ProfileCore : Level2 -->
diff --git a/media/libstagefright/filters/GraphicBufferListener.cpp b/media/libstagefright/filters/GraphicBufferListener.cpp
index a606315..c1aaa17 100644
--- a/media/libstagefright/filters/GraphicBufferListener.cpp
+++ b/media/libstagefright/filters/GraphicBufferListener.cpp
@@ -101,11 +101,11 @@
     if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
         // shouldn't happen, since we track num frames available
         ALOGE("frame was not available");
-        item.mBuf = -1;
+        item.mSlot = -1;
         return item;
     } else if (err != OK) {
         ALOGE("acquireBuffer returned err=%d", err);
-        item.mBuf = -1;
+        item.mSlot = -1;
         return item;
     }
 
@@ -119,8 +119,8 @@
     // If this is the first time we're seeing this buffer, add it to our
     // slot table.
     if (item.mGraphicBuffer != NULL) {
-        ALOGV("setting mBufferSlot %d", item.mBuf);
-        mBufferSlot[item.mBuf] = item.mGraphicBuffer;
+        ALOGV("setting mBufferSlot %d", item.mSlot);
+        mBufferSlot[item.mSlot] = item.mGraphicBuffer;
     }
 
     return item;
@@ -128,24 +128,24 @@
 
 sp<GraphicBuffer> GraphicBufferListener::getBuffer(BufferItem item) {
     sp<GraphicBuffer> buf;
-    if (item.mBuf < 0 || item.mBuf >= BufferQueue::NUM_BUFFER_SLOTS) {
-        ALOGE("getBuffer() received invalid BufferItem: mBuf==%d", item.mBuf);
+    if (item.mSlot < 0 || item.mSlot >= BufferQueue::NUM_BUFFER_SLOTS) {
+        ALOGE("getBuffer() received invalid BufferItem: mSlot==%d", item.mSlot);
         return buf;
     }
 
-    buf = mBufferSlot[item.mBuf];
+    buf = mBufferSlot[item.mSlot];
     CHECK(buf.get() != NULL);
 
     return buf;
 }
 
 status_t GraphicBufferListener::releaseBuffer(BufferItem item) {
-    if (item.mBuf < 0 || item.mBuf >= BufferQueue::NUM_BUFFER_SLOTS) {
-        ALOGE("getBuffer() received invalid BufferItem: mBuf==%d", item.mBuf);
+    if (item.mSlot < 0 || item.mSlot >= BufferQueue::NUM_BUFFER_SLOTS) {
+        ALOGE("getBuffer() received invalid BufferItem: mSlot==%d", item.mSlot);
         return ERROR_OUT_OF_RANGE;
     }
 
-    mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
+    mConsumer->releaseBuffer(item.mSlot, item.mFrameNumber,
             EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
 
     return OK;
diff --git a/media/libstagefright/filters/MediaFilter.cpp b/media/libstagefright/filters/MediaFilter.cpp
index 0cf6b06..cd69418 100644
--- a/media/libstagefright/filters/MediaFilter.cpp
+++ b/media/libstagefright/filters/MediaFilter.cpp
@@ -773,7 +773,7 @@
     convertRGBAToARGB(
             (uint8_t*)bufPtr, buf->getWidth(), buf->getHeight(),
             buf->getStride(), inputInfo->mData->data());
-    inputInfo->mBufferID = item.mBuf;
+    inputInfo->mBufferID = item.mSlot;
     inputInfo->mGeneration = mGeneration;
     inputInfo->mOutputFlags = 0;
     inputInfo->mStatus = BufferInfo::OWNED_BY_US;
diff --git a/media/libstagefright/foundation/ABitReader.cpp b/media/libstagefright/foundation/ABitReader.cpp
index beb5cc0..c5db9e6 100644
--- a/media/libstagefright/foundation/ABitReader.cpp
+++ b/media/libstagefright/foundation/ABitReader.cpp
@@ -24,14 +24,18 @@
     : mData(data),
       mSize(size),
       mReservoir(0),
-      mNumBitsLeft(0) {
+      mNumBitsLeft(0),
+      mOverRead(false) {
 }
 
 ABitReader::~ABitReader() {
 }
 
-void ABitReader::fillReservoir() {
-    CHECK_GT(mSize, 0u);
+bool ABitReader::fillReservoir() {
+    if (mSize == 0) {
+        mOverRead = true;
+        return false;
+    }
 
     mReservoir = 0;
     size_t i;
@@ -44,15 +48,32 @@
 
     mNumBitsLeft = 8 * i;
     mReservoir <<= 32 - mNumBitsLeft;
+    return true;
 }
 
 uint32_t ABitReader::getBits(size_t n) {
-    CHECK_LE(n, 32u);
+    uint32_t ret;
+    CHECK(getBitsGraceful(n, &ret));
+    return ret;
+}
+
+uint32_t ABitReader::getBitsWithFallback(size_t n, uint32_t fallback) {
+    uint32_t ret = fallback;
+    (void)getBitsGraceful(n, &ret);
+    return ret;
+}
+
+bool ABitReader::getBitsGraceful(size_t n, uint32_t *out) {
+    if (n > 32) {
+        return false;
+    }
 
     uint32_t result = 0;
     while (n > 0) {
         if (mNumBitsLeft == 0) {
-            fillReservoir();
+            if (!fillReservoir()) {
+                return false;
+            }
         }
 
         size_t m = n;
@@ -67,21 +88,30 @@
         n -= m;
     }
 
-    return result;
+    *out = result;
+    return true;
 }
 
-void ABitReader::skipBits(size_t n) {
+bool ABitReader::skipBits(size_t n) {
+    uint32_t dummy;
     while (n > 32) {
-        getBits(32);
+        if (!getBitsGraceful(32, &dummy)) {
+            return false;
+        }
         n -= 32;
     }
 
     if (n > 0) {
-        getBits(n);
+        return getBitsGraceful(n, &dummy);
     }
+    return true;
 }
 
 void ABitReader::putBits(uint32_t x, size_t n) {
+    if (mOverRead) {
+        return;
+    }
+
     CHECK_LE(n, 32u);
 
     while (mNumBitsLeft + n > 32) {
@@ -114,7 +144,7 @@
         return false;
     }
 
-    ssize_t numBitsRemaining = n - mNumBitsLeft;
+    ssize_t numBitsRemaining = (ssize_t)n - (ssize_t)mNumBitsLeft;
 
     size_t size = mSize;
     const uint8_t *data = mData;
@@ -139,8 +169,11 @@
     return (numBitsRemaining <= 0);
 }
 
-void NALBitReader::fillReservoir() {
-    CHECK_GT(mSize, 0u);
+bool NALBitReader::fillReservoir() {
+    if (mSize == 0) {
+        mOverRead = true;
+        return false;
+    }
 
     mReservoir = 0;
     size_t i = 0;
@@ -165,6 +198,7 @@
 
     mNumBitsLeft = 8 * i;
     mReservoir <<= 32 - mNumBitsLeft;
+    return true;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/foundation/ABuffer.cpp b/media/libstagefright/foundation/ABuffer.cpp
index a5b81a8..804046a 100644
--- a/media/libstagefright/foundation/ABuffer.cpp
+++ b/media/libstagefright/foundation/ABuffer.cpp
@@ -67,10 +67,6 @@
         }
     }
 
-    if (mFarewell != NULL) {
-        mFarewell->post();
-    }
-
     setMediaBufferBase(NULL);
 }
 
@@ -82,10 +78,6 @@
     mRangeLength = size;
 }
 
-void ABuffer::setFarewellMessage(const sp<AMessage> msg) {
-    mFarewell = msg;
-}
-
 sp<AMessage> ABuffer::meta() {
     if (mMeta == NULL) {
         mMeta = new AMessage;
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 725a574..37fb33f 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -595,7 +595,7 @@
 }
 
 // static
-sp<AMessage> AMessage::FromParcel(const Parcel &parcel) {
+sp<AMessage> AMessage::FromParcel(const Parcel &parcel, size_t maxNestingLevel) {
     int32_t what = parcel.readInt32();
     sp<AMessage> msg = new AMessage();
     msg->setWhat(what);
@@ -667,7 +667,19 @@
 
             case kTypeMessage:
             {
-                sp<AMessage> subMsg = AMessage::FromParcel(parcel);
+                if (maxNestingLevel == 0) {
+                    ALOGE("Too many levels of AMessage nesting.");
+                    return NULL;
+                }
+                sp<AMessage> subMsg = AMessage::FromParcel(
+                        parcel,
+                        maxNestingLevel - 1);
+                if (subMsg == NULL) {
+                    // This condition will be triggered when there exists an
+                    // object that cannot cross process boundaries or when the
+                    // level of nested AMessage is too deep.
+                    return NULL;
+                }
                 subMsg->incStrong(msg.get());
 
                 item->u.refValue = subMsg.get();
@@ -677,7 +689,7 @@
             default:
             {
                 ALOGE("This type of object cannot cross process boundaries.");
-                TRESPASS();
+                return NULL;
             }
         }
 
@@ -749,6 +761,126 @@
     }
 }
 
+sp<AMessage> AMessage::changesFrom(const sp<const AMessage> &other, bool deep) const {
+    if (other == NULL) {
+        return const_cast<AMessage*>(this);
+    }
+
+    sp<AMessage> diff = new AMessage;
+    if (mWhat != other->mWhat) {
+        diff->setWhat(mWhat);
+    }
+    if (mHandler != other->mHandler) {
+        diff->setTarget(mHandler.promote());
+    }
+
+    for (size_t i = 0; i < mNumItems; ++i) {
+        const Item &item = mItems[i];
+        const Item *oitem = other->findItem(item.mName, item.mType);
+        switch (item.mType) {
+            case kTypeInt32:
+                if (oitem == NULL || item.u.int32Value != oitem->u.int32Value) {
+                    diff->setInt32(item.mName, item.u.int32Value);
+                }
+                break;
+
+            case kTypeInt64:
+                if (oitem == NULL || item.u.int64Value != oitem->u.int64Value) {
+                    diff->setInt64(item.mName, item.u.int64Value);
+                }
+                break;
+
+            case kTypeSize:
+                if (oitem == NULL || item.u.sizeValue != oitem->u.sizeValue) {
+                    diff->setSize(item.mName, item.u.sizeValue);
+                }
+                break;
+
+            case kTypeFloat:
+                if (oitem == NULL || item.u.floatValue != oitem->u.floatValue) {
+                    diff->setFloat(item.mName, item.u.sizeValue);
+                }
+                break;
+
+            case kTypeDouble:
+                if (oitem == NULL || item.u.doubleValue != oitem->u.doubleValue) {
+                    diff->setDouble(item.mName, item.u.sizeValue);
+                }
+                break;
+
+            case kTypeString:
+                if (oitem == NULL || *item.u.stringValue != *oitem->u.stringValue) {
+                    diff->setString(item.mName, *item.u.stringValue);
+                }
+                break;
+
+            case kTypeRect:
+                if (oitem == NULL || memcmp(&item.u.rectValue, &oitem->u.rectValue, sizeof(Rect))) {
+                    diff->setRect(
+                            item.mName, item.u.rectValue.mLeft, item.u.rectValue.mTop,
+                            item.u.rectValue.mRight, item.u.rectValue.mBottom);
+                }
+                break;
+
+            case kTypePointer:
+                if (oitem == NULL || item.u.ptrValue != oitem->u.ptrValue) {
+                    diff->setPointer(item.mName, item.u.ptrValue);
+                }
+                break;
+
+            case kTypeBuffer:
+            {
+                sp<ABuffer> myBuf = static_cast<ABuffer *>(item.u.refValue);
+                if (myBuf == NULL) {
+                    if (oitem == NULL || oitem->u.refValue != NULL) {
+                        diff->setBuffer(item.mName, NULL);
+                    }
+                    break;
+                }
+                sp<ABuffer> oBuf = oitem == NULL ? NULL : static_cast<ABuffer *>(oitem->u.refValue);
+                if (oBuf == NULL
+                        || myBuf->size() != oBuf->size()
+                        || (!myBuf->data() ^ !oBuf->data()) // data nullness differs
+                        || (myBuf->data() && memcmp(myBuf->data(), oBuf->data(), myBuf->size()))) {
+                    diff->setBuffer(item.mName, myBuf);
+                }
+                break;
+            }
+
+            case kTypeMessage:
+            {
+                sp<AMessage> myMsg = static_cast<AMessage *>(item.u.refValue);
+                if (myMsg == NULL) {
+                    if (oitem == NULL || oitem->u.refValue != NULL) {
+                        diff->setMessage(item.mName, NULL);
+                    }
+                    break;
+                }
+                sp<AMessage> oMsg =
+                    oitem == NULL ? NULL : static_cast<AMessage *>(oitem->u.refValue);
+                sp<AMessage> changes = myMsg->changesFrom(oMsg, deep);
+                if (changes->countEntries()) {
+                    diff->setMessage(item.mName, deep ? changes : myMsg);
+                }
+                break;
+            }
+
+            case kTypeObject:
+                if (oitem == NULL || item.u.refValue != oitem->u.refValue) {
+                    diff->setObject(item.mName, item.u.refValue);
+                }
+                break;
+
+            default:
+            {
+                ALOGE("Unknown type %d", item.mType);
+                TRESPASS();
+            }
+        }
+    }
+    return diff;
+}
+
 size_t AMessage::countEntries() const {
     return mNumItems;
 }
diff --git a/media/libstagefright/foundation/ANetworkSession.cpp b/media/libstagefright/foundation/ANetworkSession.cpp
index b230400..46314eb 100644
--- a/media/libstagefright/foundation/ANetworkSession.cpp
+++ b/media/libstagefright/foundation/ANetworkSession.cpp
@@ -457,7 +457,7 @@
         while (mInBuffer.size() >= 2) {
             size_t offset = 2;
 
-            unsigned payloadLen = data[1] & 0x7f;
+            uint64_t payloadLen = data[1] & 0x7f;
             if (payloadLen == 126) {
                 if (offset + 2 > mInBuffer.size()) {
                     break;
@@ -485,7 +485,7 @@
                 offset += 4;
             }
 
-            if (offset + payloadLen > mInBuffer.size()) {
+            if (payloadLen > mInBuffer.size() || offset > mInBuffer.size() - payloadLen) {
                 break;
             }
 
@@ -1318,7 +1318,8 @@
 
         List<sp<Session> > sessionsToAdd;
 
-        for (size_t i = mSessions.size(); res > 0 && i-- > 0;) {
+        for (size_t i = mSessions.size(); res > 0 && i > 0;) {
+            i--;
             const sp<Session> &session = mSessions.valueAt(i);
 
             int s = session->socket();
@@ -1409,4 +1410,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/foundation/Android.mk b/media/libstagefright/foundation/Android.mk
index c68264c..3c3ed59 100644
--- a/media/libstagefright/foundation/Android.mk
+++ b/media/libstagefright/foundation/Android.mk
@@ -15,6 +15,10 @@
     AString.cpp                   \
     AStringUtils.cpp              \
     AWakeLock.cpp                 \
+    ColorUtils.cpp                \
+    MediaBuffer.cpp               \
+    MediaBufferGroup.cpp          \
+    MetaData.cpp                  \
     ParsedMessage.cpp             \
     base64.cpp                    \
     hexdump.cpp
@@ -31,6 +35,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright_foundation
 
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
new file mode 100644
index 0000000..e329766
--- /dev/null
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -0,0 +1,686 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ColorUtils"
+
+#include <inttypes.h>
+#include <arpa/inet.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALookup.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+
+namespace android {
+
+// shortcut names for brevity in the following tables
+typedef ColorAspects CA;
+typedef ColorUtils CU;
+
+#define HI_UINT16(a) (((a) >> 8) & 0xFF)
+#define LO_UINT16(a) ((a) & 0xFF)
+
+const static
+ALookup<CU::ColorRange, CA::Range> sRanges{
+    {
+        { CU::kColorRangeLimited, CA::RangeLimited },
+        { CU::kColorRangeFull, CA::RangeFull },
+        { CU::kColorRangeUnspecified, CA::RangeUnspecified },
+    }
+};
+
+const static
+ALookup<CU::ColorStandard, std::pair<CA::Primaries, CA::MatrixCoeffs>> sStandards {
+    {
+        { CU::kColorStandardUnspecified,    { CA::PrimariesUnspecified, CA::MatrixUnspecified } },
+        { CU::kColorStandardBT709,          { CA::PrimariesBT709_5, CA::MatrixBT709_5 } },
+        { CU::kColorStandardBT601_625,      { CA::PrimariesBT601_6_625, CA::MatrixBT601_6 } },
+        { CU::kColorStandardBT601_625_Unadjusted,
+                                            // this is a really close match
+                                            { CA::PrimariesBT601_6_625, CA::MatrixBT709_5 } },
+        { CU::kColorStandardBT601_525,      { CA::PrimariesBT601_6_525, CA::MatrixBT601_6 } },
+        { CU::kColorStandardBT601_525_Unadjusted,
+                                            { CA::PrimariesBT601_6_525, CA::MatrixSMPTE240M } },
+        { CU::kColorStandardBT2020,         { CA::PrimariesBT2020, CA::MatrixBT2020 } },
+        { CU::kColorStandardBT2020Constant, { CA::PrimariesBT2020, CA::MatrixBT2020Constant } },
+        { CU::kColorStandardBT470M,         { CA::PrimariesBT470_6M, CA::MatrixBT470_6M } },
+        // NOTE: there is no close match to the matrix used by standard film, chose closest
+        { CU::kColorStandardFilm,           { CA::PrimariesGenericFilm, CA::MatrixBT2020 } },
+    }
+};
+
+const static
+ALookup<CU::ColorTransfer, CA::Transfer> sTransfers{
+    {
+        { CU::kColorTransferUnspecified,    CA::TransferUnspecified },
+        { CU::kColorTransferLinear,         CA::TransferLinear },
+        { CU::kColorTransferSRGB,           CA::TransferSRGB },
+        { CU::kColorTransferSMPTE_170M,     CA::TransferSMPTE170M },
+        { CU::kColorTransferGamma22,        CA::TransferGamma22 },
+        { CU::kColorTransferGamma28,        CA::TransferGamma28 },
+        { CU::kColorTransferST2084,         CA::TransferST2084 },
+        { CU::kColorTransferHLG,            CA::TransferHLG },
+    }
+};
+
+static bool isValid(ColorAspects::Primaries p) {
+    return p <= ColorAspects::PrimariesOther;
+}
+
+static bool isDefined(ColorAspects::Primaries p) {
+    return p <= ColorAspects::PrimariesBT2020;
+}
+
+static bool isValid(ColorAspects::MatrixCoeffs c) {
+    return c <= ColorAspects::MatrixOther;
+}
+
+static bool isDefined(ColorAspects::MatrixCoeffs c) {
+    return c <= ColorAspects::MatrixBT2020Constant;
+}
+
+//static
+int32_t ColorUtils::wrapColorAspectsIntoColorStandard(
+        ColorAspects::Primaries primaries, ColorAspects::MatrixCoeffs coeffs) {
+    ColorStandard res;
+    if (sStandards.map(std::make_pair(primaries, coeffs), &res)) {
+        return res;
+    } else if (!isValid(primaries) || !isValid(coeffs)) {
+        return kColorStandardUnspecified;
+    }
+
+    // check platform media limits
+    uint32_t numPrimaries = ColorAspects::PrimariesBT2020 + 1;
+    if (isDefined(primaries) && isDefined(coeffs)) {
+        return kColorStandardExtendedStart + primaries + coeffs * numPrimaries;
+    } else {
+        return kColorStandardVendorStart + primaries + coeffs * 0x100;
+    }
+}
+
+//static
+status_t ColorUtils::unwrapColorAspectsFromColorStandard(
+        int32_t standard,
+        ColorAspects::Primaries *primaries, ColorAspects::MatrixCoeffs *coeffs) {
+    std::pair<ColorAspects::Primaries, ColorAspects::MatrixCoeffs> res;
+    if (sStandards.map((ColorStandard)standard, &res)) {
+        *primaries = res.first;
+        *coeffs = res.second;
+        return OK;
+    }
+
+    int32_t start = kColorStandardExtendedStart;
+    int32_t numPrimaries = ColorAspects::PrimariesBT2020 + 1;
+    int32_t numCoeffs = ColorAspects::MatrixBT2020Constant + 1;
+    if (standard >= (int32_t)kColorStandardVendorStart) {
+        start = kColorStandardVendorStart;
+        numPrimaries = ColorAspects::PrimariesOther + 1; // 0x100
+        numCoeffs = ColorAspects::MatrixOther + 1; // 0x100;
+    }
+    if (standard >= start && standard < start + numPrimaries * numCoeffs) {
+        int32_t product = standard - start;
+        *primaries = (ColorAspects::Primaries)(product % numPrimaries);
+        *coeffs = (ColorAspects::MatrixCoeffs)(product / numPrimaries);
+        return OK;
+    }
+    *primaries = ColorAspects::PrimariesOther;
+    *coeffs = ColorAspects::MatrixOther;
+    return BAD_VALUE;
+}
+
+static bool isValid(ColorAspects::Range r) {
+    return r <= ColorAspects::RangeOther;
+}
+
+static bool isDefined(ColorAspects::Range r) {
+    return r <= ColorAspects::RangeLimited;
+}
+
+//  static
+int32_t ColorUtils::wrapColorAspectsIntoColorRange(ColorAspects::Range range) {
+    ColorRange res;
+    if (sRanges.map(range, &res)) {
+        return res;
+    } else if (!isValid(range)) {
+        return kColorRangeUnspecified;
+    } else {
+        CHECK(!isDefined(range));
+        // all platform values are in sRanges
+        return kColorRangeVendorStart + range;
+    }
+}
+
+//static
+status_t ColorUtils::unwrapColorAspectsFromColorRange(
+        int32_t range, ColorAspects::Range *aspect) {
+    if (sRanges.map((ColorRange)range, aspect)) {
+        return OK;
+    }
+
+    int32_t start = kColorRangeVendorStart;
+    int32_t numRanges = ColorAspects::RangeOther + 1; // 0x100
+    if (range >= start && range < start + numRanges) {
+        *aspect = (ColorAspects::Range)(range - start);
+        return OK;
+    }
+    *aspect = ColorAspects::RangeOther;
+    return BAD_VALUE;
+}
+
+static bool isValid(ColorAspects::Transfer t) {
+    return t <= ColorAspects::TransferOther;
+}
+
+static bool isDefined(ColorAspects::Transfer t) {
+    return t <= ColorAspects::TransferHLG
+            || (t >= ColorAspects::TransferSMPTE240M && t <= ColorAspects::TransferST428);
+}
+
+//  static
+int32_t ColorUtils::wrapColorAspectsIntoColorTransfer(
+        ColorAspects::Transfer transfer) {
+    ColorTransfer res;
+    if (sTransfers.map(transfer, &res)) {
+        return res;
+    } else if (!isValid(transfer)) {
+        return kColorTransferUnspecified;
+    } else if (isDefined(transfer)) {
+        return kColorTransferExtendedStart + transfer;
+    } else {
+        // all platform values are in sRanges
+        return kColorTransferVendorStart + transfer;
+    }
+}
+
+//static
+status_t ColorUtils::unwrapColorAspectsFromColorTransfer(
+        int32_t transfer, ColorAspects::Transfer *aspect) {
+    if (sTransfers.map((ColorTransfer)transfer, aspect)) {
+        return OK;
+    }
+
+    int32_t start = kColorTransferExtendedStart;
+    int32_t numTransfers = ColorAspects::TransferST428 + 1;
+    if (transfer >= (int32_t)kColorTransferVendorStart) {
+        start = kColorTransferVendorStart;
+        numTransfers = ColorAspects::TransferOther + 1; // 0x100
+    }
+    if (transfer >= start && transfer < start + numTransfers) {
+        *aspect = (ColorAspects::Transfer)(transfer - start);
+        return OK;
+    }
+    *aspect = ColorAspects::TransferOther;
+    return BAD_VALUE;
+}
+
+// static
+status_t ColorUtils::convertPlatformColorAspectsToCodecAspects(
+    int32_t range, int32_t standard, int32_t transfer, ColorAspects &aspects) {
+    status_t res1 = unwrapColorAspectsFromColorRange(range, &aspects.mRange);
+    status_t res2 = unwrapColorAspectsFromColorStandard(
+            standard, &aspects.mPrimaries, &aspects.mMatrixCoeffs);
+    status_t res3 = unwrapColorAspectsFromColorTransfer(transfer, &aspects.mTransfer);
+    return res1 != OK ? res1 : (res2 != OK ? res2 : res3);
+}
+
+// static
+status_t ColorUtils::convertCodecColorAspectsToPlatformAspects(
+    const ColorAspects &aspects, int32_t *range, int32_t *standard, int32_t *transfer) {
+    *range = wrapColorAspectsIntoColorRange(aspects.mRange);
+    *standard = wrapColorAspectsIntoColorStandard(aspects.mPrimaries, aspects.mMatrixCoeffs);
+    *transfer = wrapColorAspectsIntoColorTransfer(aspects.mTransfer);
+    if (isValid(aspects.mRange) && isValid(aspects.mPrimaries)
+            && isValid(aspects.mMatrixCoeffs) && isValid(aspects.mTransfer)) {
+        return OK;
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+const static
+ALookup<int32_t, ColorAspects::Primaries> sIsoPrimaries {
+    {
+        { 1, ColorAspects::PrimariesBT709_5 },
+        { 2, ColorAspects::PrimariesUnspecified },
+        { 4, ColorAspects::PrimariesBT470_6M },
+        { 5, ColorAspects::PrimariesBT601_6_625 },
+        { 6, ColorAspects::PrimariesBT601_6_525 /* main */},
+        { 7, ColorAspects::PrimariesBT601_6_525 },
+        // -- ITU T.832 201201 ends here
+        { 8, ColorAspects::PrimariesGenericFilm },
+        { 9, ColorAspects::PrimariesBT2020 },
+        { 10, ColorAspects::PrimariesOther /* XYZ */ },
+    }
+};
+
+const static
+ALookup<int32_t, ColorAspects::Transfer> sIsoTransfers {
+    {
+        { 1, ColorAspects::TransferSMPTE170M /* main */},
+        { 2, ColorAspects::TransferUnspecified },
+        { 4, ColorAspects::TransferGamma22 },
+        { 5, ColorAspects::TransferGamma28 },
+        { 6, ColorAspects::TransferSMPTE170M },
+        { 7, ColorAspects::TransferSMPTE240M },
+        { 8, ColorAspects::TransferLinear },
+        { 9, ColorAspects::TransferOther /* log 100:1 */ },
+        { 10, ColorAspects::TransferOther /* log 316:1 */ },
+        { 11, ColorAspects::TransferXvYCC },
+        { 12, ColorAspects::TransferBT1361 },
+        { 13, ColorAspects::TransferSRGB },
+        // -- ITU T.832 201201 ends here
+        { 14, ColorAspects::TransferSMPTE170M },
+        { 15, ColorAspects::TransferSMPTE170M },
+        { 16, ColorAspects::TransferST2084 },
+        { 17, ColorAspects::TransferST428 },
+    }
+};
+
+const static
+ALookup<int32_t, ColorAspects::MatrixCoeffs> sIsoMatrixCoeffs {
+    {
+        { 0, ColorAspects::MatrixOther },
+        { 1, ColorAspects::MatrixBT709_5 },
+        { 2, ColorAspects::MatrixUnspecified },
+        { 4, ColorAspects::MatrixBT470_6M },
+        { 6, ColorAspects::MatrixBT601_6 /* main */ },
+        { 5, ColorAspects::MatrixBT601_6 },
+        { 7, ColorAspects::MatrixSMPTE240M },
+        { 8, ColorAspects::MatrixOther /* YCgCo */ },
+        // -- ITU T.832 201201 ends here
+        { 9, ColorAspects::MatrixBT2020 },
+        { 10, ColorAspects::MatrixBT2020Constant },
+    }
+};
+
+// static
+void ColorUtils::convertCodecColorAspectsToIsoAspects(
+        const ColorAspects &aspects,
+        int32_t *primaries, int32_t *transfer, int32_t *coeffs, bool *fullRange) {
+    if (aspects.mPrimaries == ColorAspects::PrimariesOther ||
+            !sIsoPrimaries.map(aspects.mPrimaries, primaries)) {
+        CHECK(sIsoPrimaries.map(ColorAspects::PrimariesUnspecified, primaries));
+    }
+    if (aspects.mTransfer == ColorAspects::TransferOther ||
+            !sIsoTransfers.map(aspects.mTransfer, transfer)) {
+        CHECK(sIsoTransfers.map(ColorAspects::TransferUnspecified, transfer));
+    }
+    if (aspects.mMatrixCoeffs == ColorAspects::MatrixOther ||
+            !sIsoMatrixCoeffs.map(aspects.mMatrixCoeffs, coeffs)) {
+        CHECK(sIsoMatrixCoeffs.map(ColorAspects::MatrixUnspecified, coeffs));
+    }
+    *fullRange = aspects.mRange == ColorAspects::RangeFull;
+}
+
+// static
+void ColorUtils::convertIsoColorAspectsToCodecAspects(
+        int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
+        ColorAspects &aspects) {
+    if (!sIsoPrimaries.map(primaries, &aspects.mPrimaries)) {
+        aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+    }
+    if (!sIsoTransfers.map(transfer, &aspects.mTransfer)) {
+        aspects.mTransfer = ColorAspects::TransferUnspecified;
+    }
+    if (!sIsoMatrixCoeffs.map(coeffs, &aspects.mMatrixCoeffs)) {
+        aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+    }
+    aspects.mRange = fullRange ? ColorAspects::RangeFull : ColorAspects::RangeLimited;
+}
+
+// static
+void ColorUtils::setDefaultCodecColorAspectsIfNeeded(
+        ColorAspects &aspects, int32_t width, int32_t height) {
+    ColorAspects::MatrixCoeffs coeffs;
+    ColorAspects::Primaries primaries;
+
+    // Default to BT2020, BT709 or BT601 based on size. Allow 2.35:1 aspect ratio. Limit BT601
+    // to PAL or smaller, BT2020 to 4K or larger, leaving BT709 for all resolutions in between.
+    if (width >= 3840 || height >= 3840 || width * (int64_t)height >= 3840 * 1634) {
+        primaries = ColorAspects::PrimariesBT2020;
+        coeffs = ColorAspects::MatrixBT2020;
+    } else if ((width <= 720 && height > 480 && height <= 576)
+            || (height <= 720 && width > 480 && width <= 576)) {
+        primaries = ColorAspects::PrimariesBT601_6_625;
+        coeffs = ColorAspects::MatrixBT601_6;
+    } else if ((width <= 720 && height <= 480) || (height <= 720 && width <= 480)) {
+        primaries = ColorAspects::PrimariesBT601_6_525;
+        coeffs = ColorAspects::MatrixBT601_6;
+    } else {
+        primaries = ColorAspects::PrimariesBT709_5;
+        coeffs = ColorAspects::MatrixBT709_5;
+    }
+
+    if (aspects.mRange == ColorAspects::RangeUnspecified) {
+        aspects.mRange = ColorAspects::RangeLimited;
+    }
+
+    if (aspects.mPrimaries == ColorAspects::PrimariesUnspecified) {
+        aspects.mPrimaries = primaries;
+    }
+    if (aspects.mMatrixCoeffs == ColorAspects::MatrixUnspecified) {
+        aspects.mMatrixCoeffs = coeffs;
+    }
+    if (aspects.mTransfer == ColorAspects::TransferUnspecified) {
+        aspects.mTransfer = ColorAspects::TransferSMPTE170M;
+    }
+}
+
+// TODO: move this into a Video HAL
+ALookup<CU::ColorStandard, std::pair<CA::Primaries, CA::MatrixCoeffs>> sStandardFallbacks {
+    {
+        { CU::kColorStandardBT601_625, { CA::PrimariesBT709_5, CA::MatrixBT470_6M } },
+        { CU::kColorStandardBT601_625, { CA::PrimariesBT709_5, CA::MatrixBT601_6 } },
+        { CU::kColorStandardBT709,     { CA::PrimariesBT709_5, CA::MatrixSMPTE240M } },
+        { CU::kColorStandardBT709,     { CA::PrimariesBT709_5, CA::MatrixBT2020 } },
+        { CU::kColorStandardBT601_525, { CA::PrimariesBT709_5, CA::MatrixBT2020Constant } },
+
+        { CU::kColorStandardBT2020Constant,
+                                       { CA::PrimariesBT470_6M, CA::MatrixBT2020Constant } },
+
+        { CU::kColorStandardBT601_625, { CA::PrimariesBT601_6_625, CA::MatrixBT470_6M } },
+        { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_625, CA::MatrixBT2020Constant } },
+
+        { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_525, CA::MatrixBT470_6M } },
+        { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_525, CA::MatrixBT2020Constant } },
+
+        { CU::kColorStandardBT2020Constant,
+                                       { CA::PrimariesGenericFilm, CA::MatrixBT2020Constant } },
+    }
+};
+
+ALookup<CU::ColorStandard, CA::Primaries> sStandardPrimariesFallbacks {
+    {
+        { CU::kColorStandardFilm,                 CA::PrimariesGenericFilm },
+        { CU::kColorStandardBT470M,               CA::PrimariesBT470_6M },
+        { CU::kColorStandardBT2020,               CA::PrimariesBT2020 },
+        { CU::kColorStandardBT601_525_Unadjusted, CA::PrimariesBT601_6_525 },
+        { CU::kColorStandardBT601_625_Unadjusted, CA::PrimariesBT601_6_625 },
+    }
+};
+
+static ALookup<android_dataspace, android_dataspace> sLegacyDataSpaceToV0 {
+    {
+        { HAL_DATASPACE_SRGB, HAL_DATASPACE_V0_SRGB },
+        { HAL_DATASPACE_BT709, HAL_DATASPACE_V0_BT709 },
+        { HAL_DATASPACE_SRGB_LINEAR, HAL_DATASPACE_V0_SRGB_LINEAR },
+        { HAL_DATASPACE_BT601_525, HAL_DATASPACE_V0_BT601_525 },
+        { HAL_DATASPACE_BT601_625, HAL_DATASPACE_V0_BT601_625 },
+        { HAL_DATASPACE_JFIF, HAL_DATASPACE_V0_JFIF },
+    }
+};
+
+bool ColorUtils::convertDataSpaceToV0(android_dataspace &dataSpace) {
+    (void)sLegacyDataSpaceToV0.lookup(dataSpace, &dataSpace);
+    return (dataSpace & 0xC000FFFF) == 0;
+}
+
+bool ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+        ColorAspects &aspects, const ColorAspects &orig, bool usePlatformAspects) {
+    // remove changed aspects (change them to Unspecified)
+    bool changed = false;
+    if (aspects.mRange && aspects.mRange != orig.mRange) {
+        aspects.mRange = ColorAspects::RangeUnspecified;
+        changed = true;
+    }
+    if (aspects.mPrimaries && aspects.mPrimaries != orig.mPrimaries) {
+        aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+        if (usePlatformAspects) {
+            aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+        }
+        changed = true;
+    }
+    if (aspects.mMatrixCoeffs && aspects.mMatrixCoeffs != orig.mMatrixCoeffs) {
+        aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+        if (usePlatformAspects) {
+            aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+        }
+        changed = true;
+    }
+    if (aspects.mTransfer && aspects.mTransfer != orig.mTransfer) {
+        aspects.mTransfer = ColorAspects::TransferUnspecified;
+        changed = true;
+    }
+    return changed;
+}
+
+// static
+android_dataspace ColorUtils::getDataSpaceForColorAspects(ColorAspects &aspects, bool mayExpand) {
+    // This platform implementation never expands color space (e.g. returns an expanded
+    // dataspace to use where the codec does in-the-background color space conversion)
+    mayExpand = false;
+
+    if (aspects.mRange == ColorAspects::RangeUnspecified
+            || aspects.mPrimaries == ColorAspects::PrimariesUnspecified
+            || aspects.mMatrixCoeffs == ColorAspects::MatrixUnspecified
+            || aspects.mTransfer == ColorAspects::TransferUnspecified) {
+        ALOGW("expected specified color aspects (%u:%u:%u:%u)",
+                aspects.mRange, aspects.mPrimaries, aspects.mMatrixCoeffs, aspects.mTransfer);
+    }
+
+    // default to video range and transfer
+    ColorRange range = kColorRangeLimited;
+    ColorTransfer transfer = kColorTransferSMPTE_170M;
+    (void)sRanges.map(aspects.mRange, &range);
+    (void)sTransfers.map(aspects.mTransfer, &transfer);
+
+    ColorStandard standard = kColorStandardBT709;
+    auto pair = std::make_pair(aspects.mPrimaries, aspects.mMatrixCoeffs);
+    if (!sStandards.map(pair, &standard)) {
+        if (!sStandardFallbacks.map(pair, &standard)) {
+            (void)sStandardPrimariesFallbacks.map(aspects.mPrimaries, &standard);
+
+            if (aspects.mMatrixCoeffs == ColorAspects::MatrixBT2020Constant) {
+                range = kColorRangeFull;
+            }
+        }
+    }
+
+    android_dataspace dataSpace = (android_dataspace)(
+            (range << HAL_DATASPACE_RANGE_SHIFT) | (standard << HAL_DATASPACE_STANDARD_SHIFT) |
+            (transfer << HAL_DATASPACE_TRANSFER_SHIFT));
+    (void)sLegacyDataSpaceToV0.rlookup(dataSpace, &dataSpace);
+
+    if (!mayExpand) {
+        // update codec aspects based on dataspace
+        convertPlatformColorAspectsToCodecAspects(range, standard, transfer, aspects);
+    }
+    return dataSpace;
+}
+
+// static
+void ColorUtils::getColorConfigFromFormat(
+        const sp<AMessage> &format, int32_t *range, int32_t *standard, int32_t *transfer) {
+    if (!format->findInt32("color-range", range)) {
+        *range = kColorRangeUnspecified;
+    }
+    if (!format->findInt32("color-standard", standard)) {
+        *standard = kColorStandardUnspecified;
+    }
+    if (!format->findInt32("color-transfer", transfer)) {
+        *transfer = kColorTransferUnspecified;
+    }
+}
+
+// static
+void ColorUtils::copyColorConfig(const sp<AMessage> &source, sp<AMessage> &target) {
+    // 0 values are unspecified
+    int32_t value;
+    if (source->findInt32("color-range", &value)) {
+        target->setInt32("color-range", value);
+    }
+    if (source->findInt32("color-standard", &value)) {
+        target->setInt32("color-standard", value);
+    }
+    if (source->findInt32("color-transfer", &value)) {
+        target->setInt32("color-transfer", value);
+    }
+}
+
+// static
+void ColorUtils::getColorAspectsFromFormat(const sp<AMessage> &format, ColorAspects &aspects) {
+    int32_t range, standard, transfer;
+    getColorConfigFromFormat(format, &range, &standard, &transfer);
+
+    if (convertPlatformColorAspectsToCodecAspects(
+            range, standard, transfer, aspects) != OK) {
+        ALOGW("Ignoring illegal color aspects(R:%d(%s), S:%d(%s), T:%d(%s))",
+                range, asString((ColorRange)range),
+                standard, asString((ColorStandard)standard),
+                transfer, asString((ColorTransfer)transfer));
+        // Invalid values were converted to unspecified !params!, but otherwise were not changed
+        // For encoders, we leave these as is. For decoders, we will use default values.
+    }
+    ALOGV("Got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+          "from format (out:R:%d(%s), S:%d(%s), T:%d(%s))",
+            aspects.mRange, asString(aspects.mRange),
+            aspects.mPrimaries, asString(aspects.mPrimaries),
+            aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+            aspects.mTransfer, asString(aspects.mTransfer),
+            range, asString((ColorRange)range),
+            standard, asString((ColorStandard)standard),
+            transfer, asString((ColorTransfer)transfer));
+}
+
+// static
+void ColorUtils::setColorAspectsIntoFormat(
+        const ColorAspects &aspects, sp<AMessage> &format, bool force) {
+    int32_t range = 0, standard = 0, transfer = 0;
+    convertCodecColorAspectsToPlatformAspects(aspects, &range, &standard, &transfer);
+    // save set values to base output format
+    // (encoder input format will read back actually supported values by the codec)
+    if (range != 0 || force) {
+        format->setInt32("color-range", range);
+    }
+    if (standard != 0 || force) {
+        format->setInt32("color-standard", standard);
+    }
+    if (transfer != 0 || force) {
+        format->setInt32("color-transfer", transfer);
+    }
+    ALOGV("Setting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+          "into format (out:R:%d(%s), S:%d(%s), T:%d(%s))",
+            aspects.mRange, asString(aspects.mRange),
+            aspects.mPrimaries, asString(aspects.mPrimaries),
+            aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+            aspects.mTransfer, asString(aspects.mTransfer),
+            range, asString((ColorRange)range),
+            standard, asString((ColorStandard)standard),
+            transfer, asString((ColorTransfer)transfer));
+}
+
+// static
+void ColorUtils::setHDRStaticInfoIntoFormat(
+        const HDRStaticInfo &info, sp<AMessage> &format) {
+    sp<ABuffer> infoBuffer = new ABuffer(25);
+
+    // Convert the data in infoBuffer to little endian format as defined by CTA-861-3
+    uint8_t *data = infoBuffer->data();
+    // Static_Metadata_Descriptor_ID
+    data[0] = info.mID;
+
+    // display primary 0
+    data[1] = LO_UINT16(info.sType1.mR.x);
+    data[2] = HI_UINT16(info.sType1.mR.x);
+    data[3] = LO_UINT16(info.sType1.mR.y);
+    data[4] = HI_UINT16(info.sType1.mR.y);
+
+    // display primary 1
+    data[5] = LO_UINT16(info.sType1.mG.x);
+    data[6] = HI_UINT16(info.sType1.mG.x);
+    data[7] = LO_UINT16(info.sType1.mG.y);
+    data[8] = HI_UINT16(info.sType1.mG.y);
+
+    // display primary 2
+    data[9] = LO_UINT16(info.sType1.mB.x);
+    data[10] = HI_UINT16(info.sType1.mB.x);
+    data[11] = LO_UINT16(info.sType1.mB.y);
+    data[12] = HI_UINT16(info.sType1.mB.y);
+
+    // white point
+    data[13] = LO_UINT16(info.sType1.mW.x);
+    data[14] = HI_UINT16(info.sType1.mW.x);
+    data[15] = LO_UINT16(info.sType1.mW.y);
+    data[16] = HI_UINT16(info.sType1.mW.y);
+
+    // MaxDisplayLuminance
+    data[17] = LO_UINT16(info.sType1.mMaxDisplayLuminance);
+    data[18] = HI_UINT16(info.sType1.mMaxDisplayLuminance);
+
+    // MinDisplayLuminance
+    data[19] = LO_UINT16(info.sType1.mMinDisplayLuminance);
+    data[20] = HI_UINT16(info.sType1.mMinDisplayLuminance);
+
+    // MaxContentLightLevel
+    data[21] = LO_UINT16(info.sType1.mMaxContentLightLevel);
+    data[22] = HI_UINT16(info.sType1.mMaxContentLightLevel);
+
+    // MaxFrameAverageLightLevel
+    data[23] = LO_UINT16(info.sType1.mMaxFrameAverageLightLevel);
+    data[24] = HI_UINT16(info.sType1.mMaxFrameAverageLightLevel);
+
+    format->setBuffer("hdr-static-info", infoBuffer);
+}
+
+// a simple method copied from Utils.cpp
+static uint16_t U16LE_AT(const uint8_t *ptr) {
+    return ptr[0] | (ptr[1] << 8);
+}
+
+// static
+bool ColorUtils::getHDRStaticInfoFromFormat(const sp<AMessage> &format, HDRStaticInfo *info) {
+    sp<ABuffer> buf;
+    if (!format->findBuffer("hdr-static-info", &buf)) {
+        return false;
+    }
+
+    // TODO: Make this more flexible when adding more members to HDRStaticInfo
+    if (buf->size() != 25 /* static Metadata Type 1 size */) {
+        ALOGW("Ignore invalid HDRStaticInfo with size: %zu", buf->size());
+        return false;
+    }
+
+    const uint8_t *data = buf->data();
+    if (*data != HDRStaticInfo::kType1) {
+        ALOGW("Unsupported static Metadata Type %u", *data);
+        return false;
+    }
+
+    info->mID = HDRStaticInfo::kType1;
+    info->sType1.mR.x = U16LE_AT(&data[1]);
+    info->sType1.mR.y = U16LE_AT(&data[3]);
+    info->sType1.mG.x = U16LE_AT(&data[5]);
+    info->sType1.mG.y = U16LE_AT(&data[7]);
+    info->sType1.mB.x = U16LE_AT(&data[9]);
+    info->sType1.mB.y = U16LE_AT(&data[11]);
+    info->sType1.mW.x = U16LE_AT(&data[13]);
+    info->sType1.mW.y = U16LE_AT(&data[15]);
+    info->sType1.mMaxDisplayLuminance = U16LE_AT(&data[17]);
+    info->sType1.mMinDisplayLuminance = U16LE_AT(&data[19]);
+    info->sType1.mMaxContentLightLevel = U16LE_AT(&data[21]);
+    info->sType1.mMaxFrameAverageLightLevel = U16LE_AT(&data[23]);
+
+    ALOGV("Got HDRStaticInfo from config (R: %u %u, G: %u %u, B: %u, %u, W: %u, %u, "
+            "MaxDispL: %u, MinDispL: %u, MaxContentL: %u, MaxFrameAvgL: %u)",
+            info->sType1.mR.x, info->sType1.mR.y, info->sType1.mG.x, info->sType1.mG.y,
+            info->sType1.mB.x, info->sType1.mB.y, info->sType1.mW.x, info->sType1.mW.y,
+            info->sType1.mMaxDisplayLuminance, info->sType1.mMinDisplayLuminance,
+            info->sType1.mMaxContentLightLevel, info->sType1.mMaxFrameAverageLightLevel);
+    return true;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/MediaBuffer.cpp b/media/libstagefright/foundation/MediaBuffer.cpp
similarity index 86%
rename from media/libstagefright/MediaBuffer.cpp
rename to media/libstagefright/foundation/MediaBuffer.cpp
index 1f80a47..fa8e241 100644
--- a/media/libstagefright/MediaBuffer.cpp
+++ b/media/libstagefright/foundation/MediaBuffer.cpp
@@ -47,13 +47,29 @@
     : mObserver(NULL),
       mNextBuffer(NULL),
       mRefCount(0),
-      mData(malloc(size)),
+      mData(NULL),
       mSize(size),
       mRangeOffset(0),
       mRangeLength(size),
       mOwnsData(true),
       mMetaData(new MetaData),
       mOriginal(NULL) {
+    if (size < kSharedMemThreshold) {
+        mData = malloc(size);
+    } else {
+        sp<MemoryDealer> memoryDealer = new MemoryDealer(size, "MediaBuffer");
+        mMemory = memoryDealer->allocate(size);
+        if (mMemory == NULL) {
+            ALOGW("Failed to allocate shared memory, trying regular allocation!");
+            mData = malloc(size);
+            if (mData == NULL) {
+                ALOGE("Out of memory");
+            }
+        } else {
+            mData = mMemory->pointer();
+            ALOGV("Allocated shared mem buffer of size %zu @ %p", size, mData);
+        }
+    }
 }
 
 MediaBuffer::MediaBuffer(const sp<GraphicBuffer>& graphicBuffer)
@@ -158,7 +174,7 @@
 MediaBuffer::~MediaBuffer() {
     CHECK(mObserver == NULL);
 
-    if (mOwnsData && mData != NULL) {
+    if (mOwnsData && mData != NULL && mMemory == NULL) {
         free(mData);
         mData = NULL;
     }
diff --git a/media/libstagefright/foundation/MediaBufferGroup.cpp b/media/libstagefright/foundation/MediaBufferGroup.cpp
new file mode 100644
index 0000000..9022324
--- /dev/null
+++ b/media/libstagefright/foundation/MediaBufferGroup.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaBufferGroup"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+
+namespace android {
+
+MediaBufferGroup::MediaBufferGroup()
+    : mFirstBuffer(NULL),
+      mLastBuffer(NULL) {
+}
+
+MediaBufferGroup::~MediaBufferGroup() {
+    MediaBuffer *next;
+    for (MediaBuffer *buffer = mFirstBuffer; buffer != NULL;
+         buffer = next) {
+        next = buffer->nextBuffer();
+
+        CHECK_EQ(buffer->refcount(), 0);
+
+        buffer->setObserver(NULL);
+        buffer->release();
+    }
+}
+
+void MediaBufferGroup::add_buffer(MediaBuffer *buffer) {
+    Mutex::Autolock autoLock(mLock);
+
+    buffer->setObserver(this);
+
+    if (mLastBuffer) {
+        mLastBuffer->setNextBuffer(buffer);
+    } else {
+        mFirstBuffer = buffer;
+    }
+
+    mLastBuffer = buffer;
+}
+
+status_t MediaBufferGroup::acquire_buffer(
+        MediaBuffer **out, bool nonBlocking, size_t requestedSize) {
+    Mutex::Autolock autoLock(mLock);
+
+    for (;;) {
+        MediaBuffer *freeBuffer = NULL;
+        MediaBuffer *freeBufferPrevious = NULL;
+        MediaBuffer *buffer = NULL;
+        MediaBuffer *bufferPrevious = NULL;
+        size_t smallest = requestedSize;
+        for (buffer = mFirstBuffer;
+             buffer != NULL; buffer = buffer->nextBuffer()) {
+            if (buffer->refcount() == 0) {
+               if (buffer->size() >= requestedSize) {
+                   break;
+               } else if (buffer->size() < smallest) {
+                   freeBuffer = buffer;
+                   freeBufferPrevious = bufferPrevious;
+               }
+            }
+            bufferPrevious = buffer;
+        }
+
+        if (buffer == NULL && freeBuffer != NULL) {
+            ALOGV("allocate new buffer, requested size %zu vs available %zu",
+                    requestedSize, freeBuffer->size());
+            size_t allocateSize = requestedSize;
+            if (requestedSize < SIZE_MAX / 3) {
+                allocateSize = requestedSize * 3 / 2;
+            }
+            MediaBuffer *newBuffer = new MediaBuffer(allocateSize);
+            newBuffer->setObserver(this);
+            if (freeBuffer == mFirstBuffer) {
+                mFirstBuffer = newBuffer;
+            }
+            if (freeBuffer == mLastBuffer) {
+                mLastBuffer = newBuffer;
+            }
+            newBuffer->setNextBuffer(freeBuffer->nextBuffer());
+            if (freeBufferPrevious != NULL) {
+                freeBufferPrevious->setNextBuffer(newBuffer);
+            }
+            freeBuffer->setObserver(NULL);
+            freeBuffer->release();
+
+            buffer = newBuffer;
+        }
+
+        if (buffer != NULL) {
+            buffer->add_ref();
+            buffer->reset();
+
+            *out = buffer;
+            goto exit;
+        }
+
+        if (nonBlocking) {
+            *out = NULL;
+            return WOULD_BLOCK;
+        }
+
+        // All buffers are in use. Block until one of them is returned to us.
+        mCondition.wait(mLock);
+    }
+
+exit:
+    return OK;
+}
+
+void MediaBufferGroup::signalBufferReturned(MediaBuffer *) {
+    Mutex::Autolock autoLock(mLock);
+    mCondition.signal();
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/foundation/MetaData.cpp
similarity index 79%
rename from media/libstagefright/MetaData.cpp
rename to media/libstagefright/foundation/MetaData.cpp
index 1a11c1e..b4abc60 100644
--- a/media/libstagefright/MetaData.cpp
+++ b/media/libstagefright/foundation/MetaData.cpp
@@ -107,7 +107,7 @@
 }
 
 bool MetaData::findInt32(uint32_t key, int32_t *value) {
-    uint32_t type;
+    uint32_t type = 0;
     const void *data;
     size_t size;
     if (!findData(key, &type, &data, &size) || type != TYPE_INT32) {
@@ -122,7 +122,7 @@
 }
 
 bool MetaData::findInt64(uint32_t key, int64_t *value) {
-    uint32_t type;
+    uint32_t type = 0;
     const void *data;
     size_t size;
     if (!findData(key, &type, &data, &size) || type != TYPE_INT64) {
@@ -137,7 +137,7 @@
 }
 
 bool MetaData::findFloat(uint32_t key, float *value) {
-    uint32_t type;
+    uint32_t type = 0;
     const void *data;
     size_t size;
     if (!findData(key, &type, &data, &size) || type != TYPE_FLOAT) {
@@ -152,7 +152,7 @@
 }
 
 bool MetaData::findPointer(uint32_t key, void **value) {
-    uint32_t type;
+    uint32_t type = 0;
     const void *data;
     size_t size;
     if (!findData(key, &type, &data, &size) || type != TYPE_POINTER) {
@@ -170,7 +170,7 @@
         uint32_t key,
         int32_t *left, int32_t *top,
         int32_t *right, int32_t *bottom) {
-    uint32_t type;
+    uint32_t type = 0;
     const void *data;
     size_t size;
     if (!findData(key, &type, &data, &size) || type != TYPE_RECT) {
@@ -316,7 +316,7 @@
     mSize = 0;
 }
 
-String8 MetaData::typed_data::asString() const {
+String8 MetaData::typed_data::asString(bool verbose) const {
     String8 out;
     const void *data = storage();
     switch(mType) {
@@ -348,7 +348,7 @@
 
         default:
             out = String8::format("(unknown type %d, size %zu)", mType, mSize);
-            if (mSize <= 48) { // if it's less than three lines of hex data, dump it
+            if (verbose && mSize <= 48) { // if it's less than three lines of hex data, dump it
                 AString foo;
                 hexdump(data, mSize, 0, &foo);
                 out.append("\n");
@@ -367,15 +367,81 @@
     s[4] = '\0';
 }
 
+String8 MetaData::toString() const {
+    String8 s;
+    for (int i = mItems.size(); --i >= 0;) {
+        int32_t key = mItems.keyAt(i);
+        char cc[5];
+        MakeFourCCString(key, cc);
+        const typed_data &item = mItems.valueAt(i);
+        s.appendFormat("%s: %s", cc, item.asString(false).string());
+        if (i != 0) {
+            s.append(", ");
+        }
+    }
+    return s;
+}
 void MetaData::dumpToLog() const {
     for (int i = mItems.size(); --i >= 0;) {
         int32_t key = mItems.keyAt(i);
         char cc[5];
         MakeFourCCString(key, cc);
         const typed_data &item = mItems.valueAt(i);
-        ALOGI("%s: %s", cc, item.asString().string());
+        ALOGI("%s: %s", cc, item.asString(true /* verbose */).string());
     }
 }
 
+status_t MetaData::writeToParcel(Parcel &parcel) {
+    size_t numItems = mItems.size();
+    parcel.writeUint32(uint32_t(numItems));
+    for (size_t i = 0; i < numItems; i++) {
+        int32_t key = mItems.keyAt(i);
+        const typed_data &item = mItems.valueAt(i);
+        uint32_t type;
+        const void *data;
+        size_t size;
+        item.getData(&type, &data, &size);
+        parcel.writeInt32(key);
+        parcel.writeUint32(type);
+        parcel.writeByteArray(size, (uint8_t*)data);
+    }
+    return OK;
+}
+
+status_t MetaData::updateFromParcel(const Parcel &parcel) {
+    uint32_t numItems;
+    if (parcel.readUint32(&numItems) == OK) {
+
+        for (size_t i = 0; i < numItems; i++) {
+            int32_t key;
+            uint32_t type;
+            uint32_t size;
+            status_t ret = parcel.readInt32(&key);
+            ret |= parcel.readUint32(&type);
+            ret |= parcel.readUint32(&size);
+            if (ret != OK) {
+                break;
+            }
+            // copy data directly from Parcel storage, then advance position
+            setData(key, type, parcel.readInplace(size), size);
+         }
+
+        return OK;
+    }
+    ALOGW("no metadata in parcel");
+    return UNKNOWN_ERROR;
+}
+
+
+/* static */
+sp<MetaData> MetaData::createFromParcel(const Parcel &parcel) {
+
+    sp<MetaData> meta = new MetaData();
+    meta->updateFromParcel(parcel);
+    return meta;
+}
+
+
+
 }  // namespace android
 
diff --git a/media/libstagefright/http/Android.mk b/media/libstagefright/http/Android.mk
index 5fb51c1..bc71134 100644
--- a/media/libstagefright/http/Android.mk
+++ b/media/libstagefright/http/Android.mk
@@ -23,6 +23,7 @@
 
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index 801ff26..76ec625 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -65,10 +65,16 @@
 
     mCachedSizeValid = false;
 
+    if (success) {
+        AString sanitized = uriDebugString(uri);
+        mName = String8::format("MediaHTTP(%s)", sanitized.c_str());
+    }
+
     return success ? OK : UNKNOWN_ERROR;
 }
 
 void MediaHTTP::disconnect() {
+    mName = String8("MediaHTTP(<disconnected>)");
     if (mInitCheck != OK) {
         return;
     }
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index fc85835..f904212 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -15,6 +15,7 @@
 
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
         libbinder \
diff --git a/media/libstagefright/httplive/HTTPDownloader.cpp b/media/libstagefright/httplive/HTTPDownloader.cpp
index 3b44bae..861b85a 100644
--- a/media/libstagefright/httplive/HTTPDownloader.cpp
+++ b/media/libstagefright/httplive/HTTPDownloader.cpp
@@ -31,6 +31,7 @@
 #include <openssl/aes.h>
 #include <openssl/md5.h>
 #include <utils/Mutex.h>
+#include <inttypes.h>
 
 namespace android {
 
@@ -165,7 +166,10 @@
         size_t maxBytesToRead = bufferRemaining;
         if (range_length >= 0) {
             int64_t bytesLeftInRange = range_length - buffer->size();
-            if (bytesLeftInRange < (int64_t)maxBytesToRead) {
+            if (bytesLeftInRange < 0) {
+                ALOGE("range_length %" PRId64 " wrapped around", range_length);
+                return ERROR_OUT_OF_RANGE;
+            } else if (bytesLeftInRange < (int64_t)maxBytesToRead) {
                 maxBytesToRead = bytesLeftInRange;
 
                 if (bytesLeftInRange == 0) {
@@ -237,7 +241,7 @@
     // MD5 functionality is not available on the simulator, treat all
     // playlists as changed.
 
-#if defined(HAVE_ANDROID_OS)
+#if defined(__ANDROID__)
     uint8_t hash[16];
 
     MD5_CTX m;
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 1557401..cebf95c 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -477,7 +477,7 @@
     sp<MetaData> meta = packetSource->getFormat();
 
     if (meta == NULL) {
-        return -EAGAIN;
+        return -EWOULDBLOCK;
     }
 
     if (stream == STREAMTYPE_AUDIO) {
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index ff2bb27..c04549a 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -603,6 +603,18 @@
                     return ERROR_MALFORMED;
                 }
                 err = parseMetaDataDuration(line, &itemMeta, "durationUs");
+            } else if (line.startsWith("#EXT-X-DISCONTINUITY-SEQUENCE")) {
+                if (mIsVariantPlaylist) {
+                    return ERROR_MALFORMED;
+                }
+                size_t seq;
+                err = parseDiscontinuitySequence(line, &seq);
+                if (err == OK) {
+                    mDiscontinuitySeq = seq;
+                    ALOGI("mDiscontinuitySeq %zu", mDiscontinuitySeq);
+                } else {
+                    ALOGI("Failed to parseDiscontinuitySequence %d", err);
+                }
             } else if (line.startsWith("#EXT-X-DISCONTINUITY")) {
                 if (mIsVariantPlaylist) {
                     return ERROR_MALFORMED;
@@ -638,15 +650,6 @@
                 }
             } else if (line.startsWith("#EXT-X-MEDIA")) {
                 err = parseMedia(line);
-            } else if (line.startsWith("#EXT-X-DISCONTINUITY-SEQUENCE")) {
-                if (mIsVariantPlaylist) {
-                    return ERROR_MALFORMED;
-                }
-                size_t seq;
-                err = parseDiscontinuitySequence(line, &seq);
-                if (err == OK) {
-                    mDiscontinuitySeq = seq;
-                }
             }
 
             if (err != OK) {
@@ -698,6 +701,22 @@
         mLastSeqNumber = mFirstSeqNumber + mItems.size() - 1;
     }
 
+    for (size_t i = 0; i < mItems.size(); ++i) {
+        sp<AMessage> meta = mItems.itemAt(i).mMeta;
+        const char *keys[] = {"audio", "video", "subtitles"};
+        for (size_t j = 0; j < sizeof(keys) / sizeof(const char *); ++j) {
+            AString groupID;
+            if (meta->findString(keys[j], &groupID)) {
+                ssize_t groupIndex = mMediaGroups.indexOfKey(groupID);
+                if (groupIndex < 0) {
+                    ALOGE("Undefined media group '%s' referenced in stream info.",
+                          groupID.c_str());
+                    return ERROR_MALFORMED;
+                }
+            }
+        }
+    }
+
     return OK;
 }
 
@@ -870,15 +889,6 @@
             }
 
             const AString &groupID = unquoteString(val);
-            ssize_t groupIndex = mMediaGroups.indexOfKey(groupID);
-
-            if (groupIndex < 0) {
-                ALOGE("Undefined media group '%s' referenced in stream info.",
-                      groupID.c_str());
-
-                return ERROR_MALFORMED;
-            }
-
             key.tolower();
             if (meta->get() == NULL) {
                 *meta = new AMessage;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 72d832e..7ad7fee 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -355,7 +355,11 @@
     if (!n) {
         return OK;
     }
-    CHECK(n % 16 == 0);
+
+    if (n < 16 || n % 16) {
+        ALOGE("not enough or trailing bytes (%zu) in encrypted buffer", n);
+        return ERROR_MALFORMED;
+    }
 
     if (first) {
         // If decrypting the first block in a file, read the iv from the manifest
@@ -364,11 +368,15 @@
         AString iv;
         if (itemMeta->findString("cipher-iv", &iv)) {
             if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
-                    || iv.size() != 16 * 2 + 2) {
+                    || iv.size() > 16 * 2 + 2) {
                 ALOGE("malformed cipher IV '%s'.", iv.c_str());
                 return ERROR_MALFORMED;
             }
 
+            while (iv.size() < 16 * 2 + 2) {
+                iv.insert("0", 1, 2);
+            }
+
             memset(mAESInitVec, 0, sizeof(mAESInitVec));
             for (size_t i = 0; i < 16; ++i) {
                 char c1 = tolower(iv.c_str()[2 + 2 * i]);
@@ -1084,6 +1092,13 @@
             // fall through
         } else {
             if (mPlaylist != NULL) {
+                if (mSeqNumber >= firstSeqNumberInPlaylist + (int32_t)mPlaylist->size()
+                        && !mPlaylist->isComplete()) {
+                    // Live playlists
+                    ALOGW("sequence number %d not yet available", mSeqNumber);
+                    postMonitorQueue(delayUsToRefreshPlaylist());
+                    return false;
+                }
                 ALOGE("Cannot find sequence number %d in playlist "
                      "(contains %d - %d)",
                      mSeqNumber, firstSeqNumberInPlaylist,
@@ -1176,8 +1191,7 @@
         // Signal a format discontinuity to ATSParser to clear partial data
         // from previous streams. Not doing this causes bitstream corruption.
         if (mTSParser != NULL) {
-            mTSParser->signalDiscontinuity(
-                    ATSParser::DISCONTINUITY_FORMATCHANGE, NULL /* extra */);
+            mTSParser.clear();
         }
 
         queueDiscontinuity(
@@ -1628,7 +1642,8 @@
 
     if (mSegmentFirstPTS < 0ll) {
         // get the smallest first PTS from all streams present in this parser
-        for (size_t i = mPacketSources.size(); i-- > 0;) {
+        for (size_t i = mPacketSources.size(); i > 0;) {
+            i--;
             const LiveSession::StreamType stream = mPacketSources.keyAt(i);
             if (stream == LiveSession::STREAMTYPE_SUBTITLES) {
                 ALOGE("MPEG2 Transport streams do not contain subtitles.");
@@ -1683,7 +1698,8 @@
     }
 
     status_t err = OK;
-    for (size_t i = mPacketSources.size(); i-- > 0;) {
+    for (size_t i = mPacketSources.size(); i > 0;) {
+        i--;
         sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
 
         const LiveSession::StreamType stream = mPacketSources.keyAt(i);
@@ -1807,7 +1823,8 @@
     }
 
     if (err != OK) {
-        for (size_t i = mPacketSources.size(); i-- > 0;) {
+        for (size_t i = mPacketSources.size(); i > 0;) {
+            i--;
             sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
             packetSource->clear();
         }
@@ -1902,6 +1919,9 @@
             while (!it.done()) {
                 size_t length;
                 const uint8_t *data = it.getData(&length);
+                if (!data) {
+                    return ERROR_MALFORMED;
+                }
 
                 static const char *kMatchName =
                     "com.apple.streaming.transportStreamTimestamp";
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index 68bd017..2cfba44 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -6,6 +6,7 @@
 
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_MODULE := libstagefright_id3
 
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 4410579..35691b9 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -618,6 +618,11 @@
         return NULL;
     }
 
+    // Prevent integer underflow
+    if (mFrameSize < getHeaderLength()) {
+        return NULL;
+    }
+
     *length = mFrameSize - getHeaderLength();
 
     return mFrameData;
@@ -832,6 +837,9 @@
     while (!it.done()) {
         size_t size;
         const uint8_t *data = it.getData(&size);
+        if (!data) {
+            return NULL;
+        }
 
         if (mVersion == ID3_V2_3 || mVersion == ID3_V2_4) {
             uint8_t encoding = data[0];
diff --git a/media/libstagefright/include/AACEncoder.h b/media/libstagefright/include/AACEncoder.h
index 52beb0e..462e905 100644
--- a/media/libstagefright/include/AACEncoder.h
+++ b/media/libstagefright/include/AACEncoder.h
@@ -27,9 +27,9 @@
 
 class MediaBufferGroup;
 
-class AACEncoder: public MediaSource {
+class AACEncoder: public BnMediaSource {
     public:
-        AACEncoder(const sp<MediaSource> &source, const sp<MetaData> &meta);
+        AACEncoder(const sp<IMediaSource> &source, const sp<MetaData> &meta);
 
         virtual status_t start(MetaData *params);
         virtual status_t stop();
@@ -42,7 +42,7 @@
         virtual ~AACEncoder();
 
     private:
-        sp<MediaSource>   mSource;
+        sp<IMediaSource>   mSource;
         sp<MetaData>      mMeta;
         bool              mStarted;
         MediaBufferGroup *mBufferGroup;
diff --git a/media/libstagefright/include/AACExtractor.h b/media/libstagefright/include/AACExtractor.h
index e98ca82..bd4c41c 100644
--- a/media/libstagefright/include/AACExtractor.h
+++ b/media/libstagefright/include/AACExtractor.h
@@ -32,10 +32,11 @@
     AACExtractor(const sp<DataSource> &source, const sp<AMessage> &meta);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "AACExtractor"; }
 
 protected:
     virtual ~AACExtractor();
diff --git a/media/libstagefright/include/AMRExtractor.h b/media/libstagefright/include/AMRExtractor.h
index 4a1c827..ba2b674 100644
--- a/media/libstagefright/include/AMRExtractor.h
+++ b/media/libstagefright/include/AMRExtractor.h
@@ -32,10 +32,11 @@
     AMRExtractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "AMRExtractor"; }
 
 protected:
     virtual ~AMRExtractor();
diff --git a/media/libstagefright/include/AVIExtractor.h b/media/libstagefright/include/AVIExtractor.h
index ff5dcb5..3be505c 100644
--- a/media/libstagefright/include/AVIExtractor.h
+++ b/media/libstagefright/include/AVIExtractor.h
@@ -36,6 +36,7 @@
             size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "AVIExtractor"; }
 
 protected:
     virtual ~AVIExtractor();
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
deleted file mode 100644
index 758b2c9..0000000
--- a/media/libstagefright/include/AwesomePlayer.h
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AWESOME_PLAYER_H_
-
-#define AWESOME_PLAYER_H_
-
-#include "HTTPBase.h"
-#include "TimedEventQueue.h"
-
-#include <media/AudioResamplerPublic.h>
-#include <media/MediaPlayerInterface.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/TimeSource.h>
-#include <media/stagefright/MetaData.h>
-#include <utils/threads.h>
-#include <drm/DrmManagerClient.h>
-
-namespace android {
-
-class AudioPlayer;
-struct ClockEstimator;
-class IDataSource;
-class MediaBuffer;
-struct MediaExtractor;
-struct MediaSource;
-struct NuCachedSource2;
-class IGraphicBufferProducer;
-
-class DrmManagerClinet;
-class DecryptHandle;
-
-class TimedTextDriver;
-class WVMExtractor;
-
-struct AwesomeRenderer : public RefBase {
-    AwesomeRenderer() {}
-
-    virtual void render(MediaBuffer *buffer) = 0;
-
-private:
-    AwesomeRenderer(const AwesomeRenderer &);
-    AwesomeRenderer &operator=(const AwesomeRenderer &);
-};
-
-struct AwesomePlayer {
-    AwesomePlayer();
-    ~AwesomePlayer();
-
-    void setListener(const wp<MediaPlayerBase> &listener);
-    void setUID(uid_t uid);
-
-    status_t setDataSource(
-            const sp<IMediaHTTPService> &httpService,
-            const char *uri,
-            const KeyedVector<String8, String8> *headers = NULL);
-
-    status_t setDataSource(int fd, int64_t offset, int64_t length);
-
-    status_t setDataSource(const sp<IStreamSource> &source);
-
-    void reset();
-
-    status_t prepare();
-    status_t prepare_l();
-    status_t prepareAsync();
-    status_t prepareAsync_l();
-
-    status_t play();
-    status_t pause();
-
-    bool isPlaying() const;
-
-    status_t setSurfaceTexture(const sp<IGraphicBufferProducer> &bufferProducer);
-    void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink);
-    status_t setLooping(bool shouldLoop);
-
-    status_t getDuration(int64_t *durationUs);
-    status_t getPosition(int64_t *positionUs);
-
-    status_t setParameter(int key, const Parcel &request);
-    status_t getParameter(int key, Parcel *reply);
-    status_t setPlaybackSettings(const AudioPlaybackRate &rate);
-    status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
-    status_t invoke(const Parcel &request, Parcel *reply);
-    status_t setCacheStatCollectFreq(const Parcel &request);
-
-    status_t seekTo(int64_t timeUs);
-
-    // This is a mask of MediaExtractor::Flags.
-    uint32_t flags() const;
-
-    void postAudioEOS(int64_t delayUs = 0ll);
-    void postAudioSeekComplete();
-    void postAudioTearDown();
-    status_t dump(int fd, const Vector<String16> &args) const;
-
-private:
-    friend struct AwesomeEvent;
-    friend struct PreviewPlayer;
-
-    enum {
-        PLAYING             = 0x01,
-        LOOPING             = 0x02,
-        FIRST_FRAME         = 0x04,
-        PREPARING           = 0x08,
-        PREPARED            = 0x10,
-        AT_EOS              = 0x20,
-        PREPARE_CANCELLED   = 0x40,
-        CACHE_UNDERRUN      = 0x80,
-        AUDIO_AT_EOS        = 0x0100,
-        VIDEO_AT_EOS        = 0x0200,
-        AUTO_LOOPING        = 0x0400,
-
-        // We are basically done preparing but are currently buffering
-        // sufficient data to begin playback and finish the preparation phase
-        // for good.
-        PREPARING_CONNECTED = 0x0800,
-
-        // We're triggering a single video event to display the first frame
-        // after the seekpoint.
-        SEEK_PREVIEW        = 0x1000,
-
-        AUDIO_RUNNING       = 0x2000,
-        AUDIOPLAYER_STARTED = 0x4000,
-
-        INCOGNITO           = 0x8000,
-
-        TEXT_RUNNING        = 0x10000,
-        TEXTPLAYER_INITIALIZED  = 0x20000,
-
-        SLOW_DECODER_HACK   = 0x40000,
-    };
-
-    mutable Mutex mLock;
-    Mutex mMiscStateLock;
-    mutable Mutex mStatsLock;
-    Mutex mAudioLock;
-
-    OMXClient mClient;
-    TimedEventQueue mQueue;
-    bool mQueueStarted;
-    wp<MediaPlayerBase> mListener;
-    bool mUIDValid;
-    uid_t mUID;
-
-    sp<ANativeWindow> mNativeWindow;
-    sp<MediaPlayerBase::AudioSink> mAudioSink;
-
-    SystemTimeSource mSystemTimeSource;
-    TimeSource *mTimeSource;
-
-    sp<IMediaHTTPService> mHTTPService;
-    String8 mUri;
-    KeyedVector<String8, String8> mUriHeaders;
-
-    sp<DataSource> mFileSource;
-
-    sp<MediaSource> mVideoTrack;
-    sp<MediaSource> mVideoSource;
-    sp<AwesomeRenderer> mVideoRenderer;
-    bool mVideoRenderingStarted;
-    bool mVideoRendererIsPreview;
-    int32_t mMediaRenderingStartGeneration;
-    int32_t mStartGeneration;
-
-    ssize_t mActiveAudioTrackIndex;
-    sp<MediaSource> mAudioTrack;
-    sp<MediaSource> mOmxSource;
-    sp<MediaSource> mAudioSource;
-    AudioPlayer *mAudioPlayer;
-    AudioPlaybackRate mPlaybackSettings;
-    int64_t mDurationUs;
-
-    int32_t mDisplayWidth;
-    int32_t mDisplayHeight;
-    int32_t mVideoScalingMode;
-
-    uint32_t mFlags;
-    uint32_t mExtractorFlags;
-    uint32_t mSinceLastDropped;
-
-    int64_t mTimeSourceDeltaUs;
-    int64_t mVideoTimeUs;
-
-    enum SeekType {
-        NO_SEEK,
-        SEEK,
-        SEEK_VIDEO_ONLY
-    };
-    SeekType mSeeking;
-
-    bool mSeekNotificationSent;
-    int64_t mSeekTimeUs;
-
-    int64_t mBitrate;  // total bitrate of the file (in bps) or -1 if unknown.
-
-    bool mWatchForAudioSeekComplete;
-    bool mWatchForAudioEOS;
-
-    sp<TimedEventQueue::Event> mVideoEvent;
-    bool mVideoEventPending;
-    sp<TimedEventQueue::Event> mStreamDoneEvent;
-    bool mStreamDoneEventPending;
-    sp<TimedEventQueue::Event> mBufferingEvent;
-    bool mBufferingEventPending;
-    sp<TimedEventQueue::Event> mCheckAudioStatusEvent;
-    bool mAudioStatusEventPending;
-    sp<TimedEventQueue::Event> mVideoLagEvent;
-    bool mVideoLagEventPending;
-    sp<TimedEventQueue::Event> mAudioTearDownEvent;
-    bool mAudioTearDownEventPending;
-    sp<TimedEventQueue::Event> mAsyncPrepareEvent;
-    Condition mPreparedCondition;
-    bool mIsAsyncPrepare;
-    status_t mPrepareResult;
-    status_t mStreamDoneStatus;
-
-    void postVideoEvent_l(int64_t delayUs = -1);
-    void postBufferingEvent_l();
-    void postStreamDoneEvent_l(status_t status);
-    void postCheckAudioStatusEvent(int64_t delayUs);
-    void postVideoLagEvent_l();
-    void postAudioTearDownEvent(int64_t delayUs);
-
-    status_t play_l();
-
-    MediaBuffer *mVideoBuffer;
-
-    sp<ClockEstimator> mClockEstimator;
-    sp<HTTPBase> mConnectingDataSource;
-    sp<NuCachedSource2> mCachedSource;
-
-    DrmManagerClient *mDrmManagerClient;
-    sp<DecryptHandle> mDecryptHandle;
-
-    int64_t mLastVideoTimeUs;
-    TimedTextDriver *mTextDriver;
-
-    sp<WVMExtractor> mWVMExtractor;
-    sp<MediaExtractor> mExtractor;
-
-    status_t setDataSource_l(
-            const sp<IMediaHTTPService> &httpService,
-            const char *uri,
-            const KeyedVector<String8, String8> *headers = NULL);
-
-    status_t setDataSource_l(const sp<DataSource> &dataSource);
-    status_t setDataSource_l(const sp<MediaExtractor> &extractor);
-    void reset_l();
-    status_t seekTo_l(int64_t timeUs);
-    status_t pause_l(bool at_eos = false);
-    void initRenderer_l();
-    void notifyVideoSize_l();
-    void seekAudioIfNecessary_l();
-
-    void cancelPlayerEvents(bool keepNotifications = false);
-
-    void setAudioSource(sp<MediaSource> source);
-    status_t initAudioDecoder();
-
-
-    void setVideoSource(sp<MediaSource> source);
-    status_t initVideoDecoder(uint32_t flags = 0);
-
-    void addTextSource_l(size_t trackIndex, const sp<MediaSource>& source);
-
-    void onStreamDone();
-
-    void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0);
-
-    void onVideoEvent();
-    void onBufferingUpdate();
-    void onCheckAudioStatus();
-    void onPrepareAsyncEvent();
-    void abortPrepare(status_t err);
-    void finishAsyncPrepare_l();
-    void onVideoLagUpdate();
-    void onAudioTearDownEvent();
-
-    void beginPrepareAsync_l();
-
-    bool getCachedDuration_l(int64_t *durationUs, bool *eos);
-
-    status_t finishSetDataSource_l();
-
-    static bool ContinuePreparation(void *cookie);
-
-    bool getBitrate(int64_t *bitrate);
-
-    int64_t estimateRealTimeUs(TimeSource *ts, int64_t systemTimeUs);
-    void finishSeekIfNecessary(int64_t videoTimeUs);
-    void ensureCacheIsFetching_l();
-
-    void notifyIfMediaStarted_l();
-    void createAudioPlayer_l();
-    status_t startAudioPlayer_l(bool sendErrorNotification = true);
-
-    void shutdownVideoDecoder_l();
-    status_t setNativeWindow_l(const sp<ANativeWindow> &native);
-
-    bool isStreamingHTTP() const;
-    void sendCacheStats();
-    void checkDrmStatus(const sp<DataSource>& dataSource);
-
-    enum FlagMode {
-        SET,
-        CLEAR,
-        ASSIGN
-    };
-    void modifyFlags(unsigned value, FlagMode mode);
-
-    struct TrackStat {
-        String8 mMIME;
-        String8 mDecoderName;
-    };
-
-    // protected by mStatsLock
-    struct Stats {
-        int mFd;
-        String8 mURI;
-        int64_t mBitrate;
-
-        // FIXME:
-        // These two indices are just 0 or 1 for now
-        // They are not representing the actual track
-        // indices in the stream.
-        ssize_t mAudioTrackIndex;
-        ssize_t mVideoTrackIndex;
-
-        int64_t mNumVideoFramesDecoded;
-        int64_t mNumVideoFramesDropped;
-        int32_t mVideoWidth;
-        int32_t mVideoHeight;
-        uint32_t mFlags;
-        Vector<TrackStat> mTracks;
-    } mStats;
-
-    bool    mOffloadAudio;
-    bool    mAudioTearDown;
-    bool    mAudioTearDownWasPlaying;
-    int64_t mAudioTearDownPosition;
-
-    status_t setVideoScalingMode(int32_t mode);
-    status_t setVideoScalingMode_l(int32_t mode);
-    status_t getTrackInfo(Parcel* reply) const;
-
-    status_t selectAudioTrack_l(const sp<MediaSource>& source, size_t trackIndex);
-
-    // when select is true, the given track is selected.
-    // otherwise, the given track is unselected.
-    status_t selectTrack(size_t trackIndex, bool select);
-
-    size_t countTracks() const;
-
-    AwesomePlayer(const AwesomePlayer &);
-    AwesomePlayer &operator=(const AwesomePlayer &);
-};
-
-}  // namespace android
-
-#endif  // AWESOME_PLAYER_H_
diff --git a/media/libstagefright/include/CallbackDataSource.h b/media/libstagefright/include/CallbackDataSource.h
index 1a21dd3..d2187d5 100644
--- a/media/libstagefright/include/CallbackDataSource.h
+++ b/media/libstagefright/include/CallbackDataSource.h
@@ -36,10 +36,18 @@
     virtual status_t initCheck() const;
     virtual ssize_t readAt(off64_t offset, void *data, size_t size);
     virtual status_t getSize(off64_t *size);
+    virtual uint32_t flags();
+    virtual void close();
+    virtual String8 toString() {
+        return mName;
+    }
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL);
 
 private:
     sp<IDataSource> mIDataSource;
     sp<IMemory> mMemory;
+    bool mIsClosed;
+    String8 mName;
 
     DISALLOW_EVIL_CONSTRUCTORS(CallbackDataSource);
 };
@@ -57,6 +65,11 @@
     virtual ssize_t readAt(off64_t offset, void* data, size_t size);
     virtual status_t getSize(off64_t* size);
     virtual uint32_t flags();
+    virtual void close() { mSource->close(); }
+    virtual String8 toString() {
+        return mName;
+    }
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL);
 
 private:
     // 2kb comes from experimenting with the time-to-first-frame from a MediaPlayer
@@ -70,6 +83,7 @@
     uint8_t mCache[kCacheSize];
     off64_t mCachedOffset;
     size_t mCachedSize;
+    String8 mName;
 
     DISALLOW_EVIL_CONSTRUCTORS(TinyCacheSource);
 };
diff --git a/media/libstagefright/include/DRMExtractor.h b/media/libstagefright/include/DRMExtractor.h
index b4e4afb..3dc7df8 100644
--- a/media/libstagefright/include/DRMExtractor.h
+++ b/media/libstagefright/include/DRMExtractor.h
@@ -18,6 +18,7 @@
 
 #define DRM_EXTRACTOR_H_
 
+#include <media/IMediaSource.h>
 #include <media/stagefright/MediaExtractor.h>
 #include <drm/DrmManagerClient.h>
 
@@ -34,9 +35,10 @@
     DRMExtractor(const sp<DataSource> &source, const char *mime);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "DRMExtractor"; }
 
 protected:
     virtual ~DRMExtractor();
@@ -44,7 +46,7 @@
 private:
     sp<DataSource> mDataSource;
 
-    sp<MediaExtractor> mOriginalExtractor;
+    sp<IMediaExtractor> mOriginalExtractor;
     sp<DecryptHandle> mDecryptHandle;
     DrmManagerClient* mDrmManagerClient;
 
diff --git a/media/libstagefright/include/DataConverter.h b/media/libstagefright/include/DataConverter.h
new file mode 100644
index 0000000..8d67921
--- /dev/null
+++ b/media/libstagefright/include/DataConverter.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_DATACONVERTER_H_
+#define STAGEFRIGHT_DATACONVERTER_H_
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+struct ABuffer;
+
+// DataConverter base class, defaults to memcpy
+struct DataConverter : public RefBase {
+    virtual size_t sourceSize(size_t targetSize); // will clamp to SIZE_MAX
+    virtual size_t targetSize(size_t sourceSize); // will clamp to SIZE_MAX
+
+    status_t convert(const sp<ABuffer> &source, sp<ABuffer> &target);
+    virtual ~DataConverter();
+
+protected:
+    virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target);
+};
+
+// SampleConverterBase uses a ratio to calculate the source and target sizes
+// based on source and target sample sizes.
+struct SampleConverterBase : public DataConverter {
+    virtual size_t sourceSize(size_t targetSize);
+    virtual size_t targetSize(size_t sourceSize);
+
+protected:
+    virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) = 0;
+
+    // sourceSize = sourceSampleSize / targetSampleSize * targetSize
+    SampleConverterBase(uint32_t sourceSampleSize, uint32_t targetSampleSize)
+        : mSourceSampleSize(sourceSampleSize),
+          mTargetSampleSize(targetSampleSize) { }
+    size_t mSourceSampleSize;
+    size_t mTargetSampleSize;
+};
+
+// AudioConverter converts between audio PCM formats
+struct AudioConverter : public SampleConverterBase {
+    // return nullptr if conversion is not needed or not supported
+    static AudioConverter *Create(AudioEncoding source, AudioEncoding target);
+
+protected:
+    virtual status_t safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt);
+
+private:
+    AudioConverter(
+            AudioEncoding source, size_t sourceSample,
+            AudioEncoding target, size_t targetSample)
+        : SampleConverterBase(sourceSample, targetSample),
+          mFrom(source),
+          mTo(target) { }
+    AudioEncoding mFrom;
+    AudioEncoding mTo;
+};
+
+} // namespace android
+
+#endif
diff --git a/media/libstagefright/include/FLACExtractor.h b/media/libstagefright/include/FLACExtractor.h
index ded91c2..5d030b1 100644
--- a/media/libstagefright/include/FLACExtractor.h
+++ b/media/libstagefright/include/FLACExtractor.h
@@ -32,10 +32,11 @@
     FLACExtractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "FLACExtractor"; }
 
 protected:
     virtual ~FLACExtractor();
diff --git a/media/libstagefright/include/HTTPBase.h b/media/libstagefright/include/HTTPBase.h
index 0c66e27..d325e30 100644
--- a/media/libstagefright/include/HTTPBase.h
+++ b/media/libstagefright/include/HTTPBase.h
@@ -56,8 +56,13 @@
     static void RegisterSocketUserMark(int sockfd, uid_t uid);
     static void UnRegisterSocketUserMark(int sockfd);
 
+    virtual String8 toString() {
+        return mName;
+    }
+
 protected:
     virtual void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
+    String8 mName;
 
 private:
     struct BandwidthEntry {
diff --git a/media/libstagefright/include/HevcUtils.h b/media/libstagefright/include/HevcUtils.h
new file mode 100644
index 0000000..0f59631
--- /dev/null
+++ b/media/libstagefright/include/HevcUtils.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HEVC_UTILS_H_
+
+#define HEVC_UTILS_H_
+
+#include <stdint.h>
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/StrongPointer.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+enum {
+    kHevcNalUnitTypeVps = 32,
+    kHevcNalUnitTypeSps = 33,
+    kHevcNalUnitTypePps = 34,
+    kHevcNalUnitTypePrefixSei = 39,
+    kHevcNalUnitTypeSuffixSei = 40,
+};
+
+enum {
+    // uint8_t
+    kGeneralProfileSpace,
+    // uint8_t
+    kGeneralTierFlag,
+    // uint8_t
+    kGeneralProfileIdc,
+    // uint32_t
+    kGeneralProfileCompatibilityFlags,
+    // uint64_t
+    kGeneralConstraintIndicatorFlags,
+    // uint8_t
+    kGeneralLevelIdc,
+    // uint8_t
+    kChromaFormatIdc,
+    // uint8_t
+    kBitDepthLumaMinus8,
+    // uint8_t
+    kBitDepthChromaMinus8,
+    // uint8_t
+    kVideoFullRangeFlag,
+    // uint8_t
+    kColourPrimaries,
+    // uint8_t
+    kTransferCharacteristics,
+    // uint8_t
+    kMatrixCoeffs,
+};
+
+class HevcParameterSets {
+public:
+    enum Info : uint32_t {
+        kInfoNone                = 0,
+        kInfoIsHdr               = 1 << 0,
+        kInfoHasColorDescription = 1 << 1,
+    };
+
+    HevcParameterSets();
+
+    status_t addNalUnit(const uint8_t* data, size_t size);
+
+    bool findParam8(uint32_t key, uint8_t *param);
+    bool findParam16(uint32_t key, uint16_t *param);
+    bool findParam32(uint32_t key, uint32_t *param);
+    bool findParam64(uint32_t key, uint64_t *param);
+
+    inline size_t getNumNalUnits() { return mNalUnits.size(); }
+    size_t getNumNalUnitsOfType(uint8_t type);
+    uint8_t getType(size_t index);
+    size_t getSize(size_t index);
+    // Note that this method does not write the start code.
+    bool write(size_t index, uint8_t* dest, size_t size);
+    status_t makeHvcc(uint8_t *hvcc, size_t *hvccSize, size_t nalSizeLength);
+
+    Info getInfo() const { return mInfo; }
+
+private:
+    status_t parseVps(const uint8_t* data, size_t size);
+    status_t parseSps(const uint8_t* data, size_t size);
+    status_t parsePps(const uint8_t* data, size_t size);
+
+    KeyedVector<uint32_t, uint64_t> mParams;
+    Vector<sp<ABuffer>> mNalUnits;
+    Info mInfo;
+
+    DISALLOW_EVIL_CONSTRUCTORS(HevcParameterSets);
+};
+
+}  // namespace android
+
+#endif  // HEVC_UTILS_H_
diff --git a/media/libstagefright/include/MP3Extractor.h b/media/libstagefright/include/MP3Extractor.h
index c83d9e80..2fd04f2 100644
--- a/media/libstagefright/include/MP3Extractor.h
+++ b/media/libstagefright/include/MP3Extractor.h
@@ -34,10 +34,11 @@
     MP3Extractor(const sp<DataSource> &source, const sp<AMessage> &meta);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "MP3Extractor"; }
 
 private:
     status_t mInitCheck;
diff --git a/media/libstagefright/include/MPEG2PSExtractor.h b/media/libstagefright/include/MPEG2PSExtractor.h
index 22cb02d..c8abfb6 100644
--- a/media/libstagefright/include/MPEG2PSExtractor.h
+++ b/media/libstagefright/include/MPEG2PSExtractor.h
@@ -34,12 +34,13 @@
     MPEG2PSExtractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
 
     virtual uint32_t flags() const;
+    virtual const char * name() { return "MPEG2PSExtractor"; }
 
 protected:
     virtual ~MPEG2PSExtractor();
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index 8eb8f6c..34b9606 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -25,6 +25,8 @@
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
 
+#include "mpeg2ts/ATSParser.h"
+
 namespace android {
 
 struct AMessage;
@@ -38,12 +40,13 @@
     MPEG2TSExtractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
 
     virtual uint32_t flags() const;
+    virtual const char * name() { return "MPEG2TSExtractor"; }
 
 private:
     friend struct MPEG2TSSource;
@@ -54,6 +57,10 @@
 
     sp<ATSParser> mParser;
 
+    // Used to remember SyncEvent occurred in feedMore() when called from init(),
+    // because init() needs to update |mSourceImpls| before adding SyncPoint.
+    ATSParser::SyncEvent mLastSyncEvent;
+
     Vector<sp<AnotherPacketSource> > mSourceImpls;
 
     Vector<KeyedVector<int64_t, off64_t> > mSyncPoints;
@@ -64,7 +71,14 @@
     off64_t mOffset;
 
     void init();
-    status_t feedMore();
+    // Try to feed more data from source to parser.
+    // |isInit| means this function is called inside init(). This is a signal to
+    // save SyncEvent so that init() can add SyncPoint after it updates |mSourceImpls|.
+    // This function returns OK if expected amount of data is fed from DataSource to
+    // parser and is successfully parsed. Otherwise, various error codes could be
+    // returned, e.g., ERROR_END_OF_STREAM, or no data availalbe from DataSource, or
+    // the data has syntax error during parsing, etc.
+    status_t feedMore(bool isInit = false);
     status_t seek(int64_t seekTimeUs,
             const MediaSource::ReadOptions::SeekMode& seekMode);
     status_t queueDiscontinuityForSeek(int64_t actualSeekTimeUs);
@@ -72,6 +86,9 @@
 
     status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
 
+    // Add a SynPoint derived from |event|.
+    void addSyncPoint_l(const ATSParser::SyncEvent &event);
+
     DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
 };
 
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 3067c3d..18b14e1 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -53,11 +53,12 @@
     MPEG4Extractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
     virtual uint32_t flags() const;
+    virtual const char * name() { return "MPEG4Extractor"; }
 
     // for DRM
     virtual char* getDrmTrackInfo(size_t trackID, int *len);
@@ -109,6 +110,7 @@
     status_t readMetaData();
     status_t parseChunk(off64_t *offset, int depth);
     status_t parseITunesMetaData(off64_t offset, size_t size);
+    status_t parseColorInfo(off64_t offset, size_t size);
     status_t parse3GPPMetaData(off64_t offset, size_t size, int depth);
     void parseID3v2MetaData(off64_t offset);
     status_t parseQTMetaKey(off64_t data_offset, size_t data_size);
diff --git a/media/libstagefright/include/MidiExtractor.h b/media/libstagefright/include/MidiExtractor.h
index 9a2abc0..5a7d90e 100644
--- a/media/libstagefright/include/MidiExtractor.h
+++ b/media/libstagefright/include/MidiExtractor.h
@@ -56,10 +56,11 @@
     MidiExtractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "MidiExtractor"; }
 
 protected:
     virtual ~MidiExtractor();
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index a29bdf9..2639280 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -48,6 +48,10 @@
 
     virtual String8 getMIMEType() const;
 
+    virtual String8 toString() {
+        return mName;
+    }
+
     ////////////////////////////////////////////////////////////////////////////
 
     size_t cachedSize();
@@ -99,6 +103,7 @@
     sp<DataSource> mSource;
     sp<AHandlerReflector<NuCachedSource2> > mReflector;
     sp<ALooper> mLooper;
+    String8 mName;
 
     Mutex mSerializer;
     mutable Mutex mLock;
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index e7c4f6d..6c073f0 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -36,7 +36,9 @@
     virtual status_t listNodes(List<ComponentInfo> *list);
 
     virtual status_t allocateNode(
-            const char *name, const sp<IOMXObserver> &observer, node_id *node);
+            const char *name, const sp<IOMXObserver> &observer,
+            sp<IBinder> *nodeBinder,
+            node_id *node);
 
     virtual status_t freeNode(node_id node);
 
@@ -62,8 +64,8 @@
     virtual status_t getState(
             node_id node, OMX_STATETYPE* state);
 
-    virtual status_t enableGraphicBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable);
+    virtual status_t enableNativeBuffers(
+            node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable);
 
     virtual status_t getGraphicBufferUsage(
             node_id node, OMX_U32 port_index, OMX_U32* usage);
@@ -91,8 +93,12 @@
             node_id node, OMX_U32 port_index,
             const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
 
-    virtual status_t createInputSurface(
+    virtual status_t updateNativeHandleInMeta(
             node_id node, OMX_U32 port_index,
+            const sp<NativeHandle> &nativeHandle, buffer_id buffer);
+
+    virtual status_t createInputSurface(
+            node_id node, OMX_U32 port_index, android_dataspace dataSpace,
             sp<IGraphicBufferProducer> *bufferProducer,
             MetadataBufferType *type);
 
@@ -107,9 +113,9 @@
 
     virtual status_t signalEndOfInputStream(node_id node);
 
-    virtual status_t allocateBuffer(
+    virtual status_t allocateSecureBuffer(
             node_id node, OMX_U32 port_index, size_t size,
-            buffer_id *buffer, void **buffer_data);
+            buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle);
 
     virtual status_t allocateBufferWithBackup(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
@@ -166,13 +172,13 @@
 
     Mutex mLock;
     OMXMaster *mMaster;
-    int32_t mNodeCounter;
+    size_t mNodeCounter;
 
     KeyedVector<wp<IBinder>, OMXNodeInstance *> mLiveNodes;
     KeyedVector<node_id, OMXNodeInstance *> mNodeIDToInstance;
     KeyedVector<node_id, sp<CallbackDispatcher> > mDispatchers;
 
-    node_id makeNodeID(OMXNodeInstance *instance);
+    node_id makeNodeID_l(OMXNodeInstance *instance);
     OMXNodeInstance *findInstance(node_id node);
     sp<CallbackDispatcher> findDispatcher(node_id node);
 
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index babf5b7..56ab3f6 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -29,8 +29,6 @@
 struct OMXMaster;
 class GraphicBufferSource;
 
-status_t StatusFromOMXError(OMX_ERRORTYPE err);
-
 struct OMXNodeInstance {
     OMXNodeInstance(
             OMX *owner, const sp<IOMXObserver> &observer, const char *name);
@@ -54,7 +52,7 @@
 
     status_t getState(OMX_STATETYPE* state);
 
-    status_t enableGraphicBuffers(OMX_U32 portIndex, OMX_BOOL enable);
+    status_t enableNativeBuffers(OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable);
 
     status_t getGraphicBufferUsage(OMX_U32 portIndex, OMX_U32* usage);
 
@@ -81,8 +79,13 @@
             OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
             OMX::buffer_id buffer);
 
+    status_t updateNativeHandleInMeta(
+            OMX_U32 portIndex, const sp<NativeHandle> &nativeHandle,
+            OMX::buffer_id buffer);
+
     status_t createInputSurface(
-            OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer,
+            OMX_U32 portIndex, android_dataspace dataSpace,
+            sp<IGraphicBufferProducer> *bufferProducer,
             MetadataBufferType *type);
 
     static status_t createPersistentInputSurface(
@@ -95,9 +98,11 @@
 
     status_t signalEndOfInputStream();
 
-    status_t allocateBuffer(
+    void signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
+
+    status_t allocateSecureBuffer(
             OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
-            void **buffer_data);
+            void **buffer_data, sp<NativeHandle> *native_handle);
 
     status_t allocateBufferWithBackup(
             OMX_U32 portIndex, const sp<IMemory> &params,
@@ -165,7 +170,15 @@
     uint32_t mBufferIDCount;
     KeyedVector<OMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
     KeyedVector<OMX_BUFFERHEADERTYPE *, OMX::buffer_id> mBufferHeaderToBufferID;
+
+    // metadata and secure buffer type tracking
     MetadataBufferType mMetadataType[2];
+    enum SecureBufferType {
+        kSecureBufferTypeUnknown,
+        kSecureBufferTypeOpaque,
+        kSecureBufferTypeNativeHandle,
+    };
+    SecureBufferType mSecureBufferType[2];
 
     // For debug support
     char *mName;
@@ -228,9 +241,14 @@
             OMX_BUFFERHEADERTYPE *header,
             OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr, int fenceFd);
 
+    // Updates the graphic buffer handle in the metadata buffer for |buffer| and |header| to
+    // |graphicBuffer|'s handle. If |updateCodecBuffer| is true, the update will happen in
+    // the actual codec buffer (use this if not using emptyBuffer (with no _l) later to
+    // pass the buffer to the codec, as only emptyBuffer copies the backup buffer to the codec
+    // buffer.)
     status_t updateGraphicBufferInMeta_l(
             OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
-            OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header);
+            OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header, bool updateCodecBuffer);
 
     status_t createGraphicBufferSource(
             OMX_U32 portIndex, sp<IGraphicBufferConsumer> consumer /* nullable */,
diff --git a/media/libstagefright/include/OggExtractor.h b/media/libstagefright/include/OggExtractor.h
index c647cbb..592c264 100644
--- a/media/libstagefright/include/OggExtractor.h
+++ b/media/libstagefright/include/OggExtractor.h
@@ -34,10 +34,11 @@
     OggExtractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "OggExtractor"; }
 
 protected:
     virtual ~OggExtractor();
diff --git a/media/libstagefright/include/SampleIterator.h b/media/libstagefright/include/SampleIterator.h
index 7053247..2ef41ae 100644
--- a/media/libstagefright/include/SampleIterator.h
+++ b/media/libstagefright/include/SampleIterator.h
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#ifndef SAMPLE_ITERATOR_H_
+
+#define SAMPLE_ITERATOR_H_
+
 #include <utils/Vector.h>
 
 namespace android {
@@ -75,3 +79,4 @@
 
 }  // namespace android
 
+#endif  // SAMPLE_ITERATOR_H_
diff --git a/media/libstagefright/include/SampleTable.h b/media/libstagefright/include/SampleTable.h
index 552eef7..2100ca6 100644
--- a/media/libstagefright/include/SampleTable.h
+++ b/media/libstagefright/include/SampleTable.h
@@ -123,7 +123,7 @@
     };
     SampleTimeEntry *mSampleTimeEntries;
 
-    uint32_t *mCompositionTimeDeltaEntries;
+    int32_t *mCompositionTimeDeltaEntries;
     size_t mNumCompositionTimeDeltaEntries;
     CompositionDeltaLookup *mCompositionDeltaLookup;
 
@@ -155,7 +155,7 @@
     }
 
     status_t getSampleSize_l(uint32_t sample_index, size_t *sample_size);
-    uint32_t getCompositionTimeOffset(uint32_t sampleIndex);
+    int32_t getCompositionTimeOffset(uint32_t sampleIndex);
 
     static int CompareIncreasingTime(const void *, const void *);
 
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index 757b308..258511a 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -54,6 +54,7 @@
     int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
     int32_t mCropWidth, mCropHeight;
     int32_t mRotationDegrees;
+    android_dataspace mDataSpace;
     FrameRenderTracker mRenderTracker;
 
     SoftwareRenderer(const SoftwareRenderer &);
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index fd739d0..b7ac718 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -18,9 +18,9 @@
 
 #define STAGEFRIGHT_METADATA_RETRIEVER_H_
 
+#include <media/IMediaExtractor.h>
 #include <media/MediaMetadataRetrieverInterface.h>
 
-#include <media/stagefright/OMXClient.h>
 #include <utils/KeyedVector.h>
 
 namespace android {
@@ -45,9 +45,8 @@
     virtual const char *extractMetadata(int keyCode);
 
 private:
-    OMXClient mClient;
     sp<DataSource> mSource;
-    sp<MediaExtractor> mExtractor;
+    sp<IMediaExtractor> mExtractor;
 
     bool mParsedMetaData;
     KeyedVector<int, String8> mMetaData;
diff --git a/media/libstagefright/include/TimedEventQueue.h b/media/libstagefright/include/TimedEventQueue.h
deleted file mode 100644
index 890f7e8..0000000
--- a/media/libstagefright/include/TimedEventQueue.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIMED_EVENT_QUEUE_H_
-
-#define TIMED_EVENT_QUEUE_H_
-
-#include <pthread.h>
-
-#include <utils/List.h>
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-#include <powermanager/IPowerManager.h>
-
-namespace android {
-
-struct TimedEventQueue {
-
-    typedef int32_t event_id;
-
-    struct Event : public RefBase {
-        Event()
-            : mEventID(0) {
-        }
-
-        virtual ~Event() {}
-
-        event_id eventID() {
-            return mEventID;
-        }
-
-    protected:
-        virtual void fire(TimedEventQueue *queue, int64_t now_us) = 0;
-
-    private:
-        friend struct TimedEventQueue;
-
-        event_id mEventID;
-
-        void setEventID(event_id id) {
-            mEventID = id;
-        }
-
-        Event(const Event &);
-        Event &operator=(const Event &);
-    };
-
-    class PMDeathRecipient : public IBinder::DeathRecipient {
-    public:
-                    PMDeathRecipient(TimedEventQueue *queue) : mQueue(queue) {}
-        virtual     ~PMDeathRecipient() {}
-
-        // IBinder::DeathRecipient
-        virtual     void        binderDied(const wp<IBinder>& who);
-
-    private:
-                    PMDeathRecipient(const PMDeathRecipient&);
-                    PMDeathRecipient& operator = (const PMDeathRecipient&);
-
-                    TimedEventQueue *mQueue;
-    };
-
-    TimedEventQueue();
-    ~TimedEventQueue();
-
-    // Start executing the event loop.
-    void start();
-
-    // Stop executing the event loop, if flush is false, any pending
-    // events are discarded, otherwise the queue will stop (and this call
-    // return) once all pending events have been handled.
-    void stop(bool flush = false);
-
-    // Posts an event to the front of the queue (after all events that
-    // have previously been posted to the front but before timed events).
-    event_id postEvent(const sp<Event> &event);
-
-    event_id postEventToBack(const sp<Event> &event);
-
-    // It is an error to post an event with a negative delay.
-    event_id postEventWithDelay(const sp<Event> &event, int64_t delay_us);
-
-    // If the event is to be posted at a time that has already passed,
-    // it will fire as soon as possible.
-    event_id postTimedEvent(const sp<Event> &event, int64_t realtime_us);
-
-    // Returns true iff event is currently in the queue and has been
-    // successfully cancelled. In this case the event will have been
-    // removed from the queue and won't fire.
-    bool cancelEvent(event_id id);
-
-    // Cancel any pending event that satisfies the predicate.
-    // If stopAfterFirstMatch is true, only cancels the first event
-    // satisfying the predicate (if any).
-    void cancelEvents(
-            bool (*predicate)(void *cookie, const sp<Event> &event),
-            void *cookie,
-            bool stopAfterFirstMatch = false);
-
-    static int64_t getRealTimeUs();
-
-    void clearPowerManager();
-
-private:
-    struct QueueItem {
-        sp<Event> event;
-        int64_t realtime_us;
-        bool has_wakelock;
-    };
-
-    struct StopEvent : public TimedEventQueue::Event {
-        virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
-            queue->mStopped = true;
-        }
-    };
-
-    pthread_t mThread;
-    List<QueueItem> mQueue;
-    Mutex mLock;
-    Condition mQueueNotEmptyCondition;
-    Condition mQueueHeadChangedCondition;
-    event_id mNextEventID;
-
-    bool mRunning;
-    bool mStopped;
-
-    sp<IPowerManager>       mPowerManager;
-    sp<IBinder>             mWakeLockToken;
-    const sp<PMDeathRecipient> mDeathRecipient;
-    uint32_t                mWakeLockCount;
-
-    static void *ThreadWrapper(void *me);
-    void threadEntry();
-
-    sp<Event> removeEventFromQueue_l(event_id id, bool *wakeLocked);
-
-    void acquireWakeLock_l();
-    void releaseWakeLock_l(bool force = false);
-
-    TimedEventQueue(const TimedEventQueue &);
-    TimedEventQueue &operator=(const TimedEventQueue &);
-};
-
-}  // namespace android
-
-#endif  // TIMED_EVENT_QUEUE_H_
diff --git a/media/libstagefright/include/WAVExtractor.h b/media/libstagefright/include/WAVExtractor.h
index c567ccd..91ee870 100644
--- a/media/libstagefright/include/WAVExtractor.h
+++ b/media/libstagefright/include/WAVExtractor.h
@@ -33,10 +33,11 @@
     WAVExtractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
 
     virtual sp<MetaData> getMetaData();
+    virtual const char * name() { return "WAVExtractor"; }
 
 protected:
     virtual ~WAVExtractor();
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
index ab7e8b8..5b91072 100644
--- a/media/libstagefright/include/WVMExtractor.h
+++ b/media/libstagefright/include/WVMExtractor.h
@@ -46,7 +46,7 @@
     WVMExtractor(const sp<DataSource> &source);
 
     virtual size_t countTracks();
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
     virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
     virtual sp<MetaData> getMetaData();
     virtual void setUID(uid_t uid);
diff --git a/media/libstagefright/include/avc_utils.h b/media/libstagefright/include/avc_utils.h
index dafa07e..7465b35 100644
--- a/media/libstagefright/include/avc_utils.h
+++ b/media/libstagefright/include/avc_utils.h
@@ -47,8 +47,34 @@
         int32_t *width, int32_t *height,
         int32_t *sarWidth = NULL, int32_t *sarHeight = NULL);
 
+// Gets and returns an unsigned exp-golomb (ue) value from a bit reader |br|. Aborts if the value
+// is more than 64 bits long (>=0xFFFF (!)) or the bit reader overflows.
 unsigned parseUE(ABitReader *br);
 
+// Gets and returns a signed exp-golomb (se) value from a bit reader |br|. Aborts if the value is
+// more than 64 bits long (>0x7FFF || <-0x7FFF (!)) or the bit reader overflows.
+signed parseSE(ABitReader *br);
+
+// Gets an unsigned exp-golomb (ue) value from a bit reader |br|, and returns it if it was
+// successful. Returns |fallback| if it was unsuccessful. Note: if the value was longer that 64
+// bits, it reads past the value and still returns |fallback|.
+unsigned parseUEWithFallback(ABitReader *br, unsigned fallback);
+
+// Gets a signed exp-golomb (se) value from a bit reader |br|, and returns it if it was successful.
+// Returns |fallback| if it was unsuccessful. Note: if the value was longer that 64 bits, it reads
+// past the value and still returns |fallback|.
+signed parseSEWithFallback(ABitReader *br, signed fallback);
+
+// Skips an unsigned exp-golomb (ue) value from bit reader |br|.
+inline void skipUE(ABitReader *br) {
+    (void)parseUEWithFallback(br, 0U);
+}
+
+// Skips a signed exp-golomb (se) value from bit reader |br|.
+inline void skipSE(ABitReader *br) {
+    (void)parseSEWithFallback(br, 0);
+}
+
 status_t getNextNALUnit(
         const uint8_t **_data, size_t *_size,
         const uint8_t **nalStart, size_t *nalSize,
diff --git a/media/libstagefright/matroska/Android.mk b/media/libstagefright/matroska/Android.mk
index 1e8c2b2..b0cbf08 100644
--- a/media/libstagefright/matroska/Android.mk
+++ b/media/libstagefright/matroska/Android.mk
@@ -7,9 +7,11 @@
 LOCAL_C_INCLUDES:= \
         $(TOP)/external/libvpx/libwebm \
         $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/av/media/libstagefright/include \
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright_matroska
 
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index ecc2573..0f9430e 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -19,9 +19,12 @@
 #include <utils/Log.h>
 
 #include "MatroskaExtractor.h"
+#include "avc_utils.h"
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBuffer.h>
@@ -144,12 +147,13 @@
     Type mType;
     bool mIsAudio;
     BlockIterator mBlockIter;
-    size_t mNALSizeLen;  // for type AVC
+    ssize_t mNALSizeLen;  // for type AVC
 
     List<MediaBuffer *> mPendingFrames;
 
     status_t advance();
 
+    status_t setWebmBlockCryptoInfo(MediaBuffer *mbuf);
     status_t readBlock();
     void clearPendingFrames();
 
@@ -213,7 +217,7 @@
       mBlockIter(mExtractor.get(),
                  mExtractor->mTracks.itemAt(index).mTrackNum,
                  index),
-      mNALSizeLen(0) {
+      mNALSizeLen(-1) {
     sp<MetaData> meta = mExtractor->mTracks.itemAt(index).mMeta;
 
     const char *mime;
@@ -227,13 +231,18 @@
         uint32_t dummy;
         const uint8_t *avcc;
         size_t avccSize;
-        CHECK(meta->findData(
-                    kKeyAVCC, &dummy, (const void **)&avcc, &avccSize));
-
-        CHECK_GE(avccSize, 5u);
-
-        mNALSizeLen = 1 + (avcc[4] & 3);
-        ALOGV("mNALSizeLen = %zu", mNALSizeLen);
+        int32_t nalSizeLen = 0;
+        if (meta->findInt32(kKeyNalLengthSize, &nalSizeLen)) {
+            if (nalSizeLen >= 0 && nalSizeLen <= 4) {
+                mNALSizeLen = nalSizeLen;
+            }
+        } else if (meta->findData(kKeyAVCC, &dummy, (const void **)&avcc, &avccSize)
+                && avccSize >= 5u) {
+            mNALSizeLen = 1 + (avcc[4] & 3);
+            ALOGV("mNALSizeLen = %zd", mNALSizeLen);
+        } else {
+            ALOGE("No mNALSizeLen");
+        }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
         mType = AAC;
     }
@@ -244,6 +253,10 @@
 }
 
 status_t MatroskaSource::start(MetaData * /* params */) {
+    if (mType == AVC && mNALSizeLen < 0) {
+        return ERROR_MALFORMED;
+    }
+
     mBlockIter.reset();
 
     return OK;
@@ -492,6 +505,9 @@
 }
 
 int64_t BlockIterator::blockTimeUs() const {
+    if (mCluster == NULL || mBlockEntry == NULL) {
+        return -1;
+    }
     return (mBlockEntry->GetBlock()->GetTime(mCluster) + 500ll) / 1000ll;
 }
 
@@ -511,6 +527,72 @@
     }
 }
 
+status_t MatroskaSource::setWebmBlockCryptoInfo(MediaBuffer *mbuf) {
+    if (mbuf->range_length() < 1 || mbuf->range_length() - 1 > INT32_MAX) {
+        // 1-byte signal
+        return ERROR_MALFORMED;
+    }
+
+    const uint8_t *data = (const uint8_t *)mbuf->data() + mbuf->range_offset();
+    bool blockEncrypted = data[0] & 0x1;
+    if (blockEncrypted && mbuf->range_length() < 9) {
+        // 1-byte signal + 8-byte IV
+        return ERROR_MALFORMED;
+    }
+
+    sp<MetaData> meta = mbuf->meta_data();
+    if (blockEncrypted) {
+        /*
+         *  0                   1                   2                   3
+         *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+         *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         *  |  Signal Byte  |                                               |
+         *  +-+-+-+-+-+-+-+-+             IV                                |
+         *  |                                                               |
+         *  |               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         *  |               |                                               |
+         *  |-+-+-+-+-+-+-+-+                                               |
+         *  :               Bytes 1..N of encrypted frame                   :
+         *  |                                                               |
+         *  |                                                               |
+         *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         */
+        int32_t plainSizes[] = { 0 };
+        int32_t encryptedSizes[] = { static_cast<int32_t>(mbuf->range_length() - 9) };
+        uint8_t ctrCounter[16] = { 0 };
+        uint32_t type;
+        const uint8_t *keyId;
+        size_t keyIdSize;
+        sp<MetaData> trackMeta = mExtractor->mTracks.itemAt(mTrackIndex).mMeta;
+        CHECK(trackMeta->findData(kKeyCryptoKey, &type, (const void **)&keyId, &keyIdSize));
+        meta->setData(kKeyCryptoKey, 0, keyId, keyIdSize);
+        memcpy(ctrCounter, data + 1, 8);
+        meta->setData(kKeyCryptoIV, 0, ctrCounter, 16);
+        meta->setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
+        meta->setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
+        mbuf->set_range(9, mbuf->range_length() - 9);
+    } else {
+        /*
+         *  0                   1                   2                   3
+         *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+         *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         *  |  Signal Byte  |                                               |
+         *  +-+-+-+-+-+-+-+-+                                               |
+         *  :               Bytes 1..N of unencrypted frame                 :
+         *  |                                                               |
+         *  |                                                               |
+         *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         */
+        int32_t plainSizes[] = { static_cast<int32_t>(mbuf->range_length() - 1) };
+        int32_t encryptedSizes[] = { 0 };
+        meta->setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
+        meta->setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
+        mbuf->set_range(1, mbuf->range_length() - 1);
+    }
+
+    return OK;
+}
+
 status_t MatroskaSource::readBlock() {
     CHECK(mPendingFrames.empty());
 
@@ -529,12 +611,19 @@
         mbuf->meta_data()->setInt64(kKeyTime, timeUs);
         mbuf->meta_data()->setInt32(kKeyIsSyncFrame, block->IsKey());
 
-        long n = frame.Read(mExtractor->mReader, (unsigned char *)mbuf->data());
-        if (n != 0) {
+        status_t err = frame.Read(mExtractor->mReader, static_cast<uint8_t *>(mbuf->data()));
+        if (err == OK
+                && mExtractor->mIsWebm
+                && mExtractor->mTracks.itemAt(mTrackIndex).mEncrypted) {
+            err = setWebmBlockCryptoInfo(mbuf);
+        }
+
+        if (err != OK) {
             mPendingFrames.clear();
 
             mBlockIter.advance();
-            return ERROR_IO;
+            mbuf->release();
+            return err;
         }
 
         mPendingFrames.push_back(mbuf);
@@ -581,7 +670,7 @@
     MediaBuffer *frame = *mPendingFrames.begin();
     mPendingFrames.erase(mPendingFrames.begin());
 
-    if (mType != AVC) {
+    if (mType != AVC || mNALSizeLen == 0) {
         if (targetSampleTimeUs >= 0ll) {
             frame->meta_data()->setInt64(
                     kKeyTargetTime, targetSampleTimeUs);
@@ -597,6 +686,9 @@
     // followed by a corresponding number of bytes containing the fragment.
     // We output all these fragments into a single large buffer separated
     // by startcodes (0x00 0x00 0x00 0x01).
+    //
+    // When mNALSizeLen is 0, we assume the data is already in the format
+    // desired.
 
     const uint8_t *srcPtr =
         (const uint8_t *)frame->data() + frame->range_offset();
@@ -633,9 +725,11 @@
             if (pass == 1) {
                 memcpy(&dstPtr[dstOffset], "\x00\x00\x00\x01", 4);
 
-                memcpy(&dstPtr[dstOffset + 4],
-                       &srcPtr[srcOffset + mNALSizeLen],
-                       NALsize);
+                if (frame != buffer) {
+                    memcpy(&dstPtr[dstOffset + 4],
+                           &srcPtr[srcOffset + mNALSizeLen],
+                           NALsize);
+                }
             }
 
             dstOffset += 4;  // 0x00 00 00 01
@@ -657,7 +751,13 @@
         if (pass == 0) {
             dstSize = dstOffset;
 
-            buffer = new MediaBuffer(dstSize);
+            if (dstSize == srcSize && mNALSizeLen == 4) {
+                // In this special case we can re-use the input buffer by substituting
+                // each 4-byte nal size with a 4-byte start code
+                buffer = frame;
+            } else {
+                buffer = new MediaBuffer(dstSize);
+            }
 
             int64_t timeUs;
             CHECK(frame->meta_data()->findInt64(kKeyTime, &timeUs));
@@ -671,8 +771,10 @@
         }
     }
 
-    frame->release();
-    frame = NULL;
+    if (frame != buffer) {
+        frame->release();
+        frame = NULL;
+    }
 
     if (targetSampleTimeUs >= 0ll) {
         buffer->meta_data()->setInt64(
@@ -761,7 +863,7 @@
     return mTracks.size();
 }
 
-sp<MediaSource> MatroskaExtractor::getTrack(size_t index) {
+sp<IMediaSource> MatroskaExtractor::getTrack(size_t index) {
     if (index >= mTracks.size()) {
         return NULL;
     }
@@ -928,6 +1030,140 @@
     return OK;
 }
 
+status_t MatroskaExtractor::synthesizeAVCC(TrackInfo *trackInfo, size_t index) {
+    BlockIterator iter(this, trackInfo->mTrackNum, index);
+    if (iter.eos()) {
+        return ERROR_MALFORMED;
+    }
+
+    const mkvparser::Block *block = iter.block();
+    if (block->GetFrameCount() <= 0) {
+        return ERROR_MALFORMED;
+    }
+
+    const mkvparser::Block::Frame &frame = block->GetFrame(0);
+    sp<ABuffer> abuf = new ABuffer(frame.len);
+    long n = frame.Read(mReader, abuf->data());
+    if (n != 0) {
+        return ERROR_MALFORMED;
+    }
+
+    sp<MetaData> avcMeta = MakeAVCCodecSpecificData(abuf);
+    if (avcMeta == NULL) {
+        return ERROR_MALFORMED;
+    }
+
+    // Override the synthesized nal length size, which is arbitrary
+    avcMeta->setInt32(kKeyNalLengthSize, 0);
+    trackInfo->mMeta = avcMeta;
+    return OK;
+}
+
+static inline bool isValidInt32ColourValue(long long value) {
+    return value != mkvparser::Colour::kValueNotPresent
+            && value >= INT32_MIN
+            && value <= INT32_MAX;
+}
+
+static inline bool isValidUint16ColourValue(long long value) {
+    return value != mkvparser::Colour::kValueNotPresent
+            && value >= 0
+            && value <= UINT16_MAX;
+}
+
+static inline bool isValidPrimary(const mkvparser::PrimaryChromaticity *primary) {
+    return primary != NULL && primary->x >= 0 && primary->x <= 1
+             && primary->y >= 0 && primary->y <= 1;
+}
+
+void MatroskaExtractor::getColorInformation(
+        const mkvparser::VideoTrack *vtrack, sp<MetaData> &meta) {
+    const mkvparser::Colour *color = vtrack->GetColour();
+    if (color == NULL) {
+        return;
+    }
+
+    // Color Aspects
+    {
+        int32_t primaries = 2; // ISO unspecified
+        int32_t transfer = 2; // ISO unspecified
+        int32_t coeffs = 2; // ISO unspecified
+        bool fullRange = false; // default
+        bool rangeSpecified = false;
+
+        if (isValidInt32ColourValue(color->primaries)) {
+            primaries = color->primaries;
+        }
+        if (isValidInt32ColourValue(color->transfer_characteristics)) {
+            transfer = color->transfer_characteristics;
+        }
+        if (isValidInt32ColourValue(color->matrix_coefficients)) {
+            coeffs = color->matrix_coefficients;
+        }
+        if (color->range != mkvparser::Colour::kValueNotPresent
+                && color->range != 0 /* MKV unspecified */) {
+            // We only support MKV broadcast range (== limited) and full range.
+            // We treat all other value as the default limited range.
+            fullRange = color->range == 2 /* MKV fullRange */;
+            rangeSpecified = true;
+        }
+
+        ColorAspects aspects;
+        ColorUtils::convertIsoColorAspectsToCodecAspects(
+                primaries, transfer, coeffs, fullRange, aspects);
+        meta->setInt32(kKeyColorPrimaries, aspects.mPrimaries);
+        meta->setInt32(kKeyTransferFunction, aspects.mTransfer);
+        meta->setInt32(kKeyColorMatrix, aspects.mMatrixCoeffs);
+        meta->setInt32(
+                kKeyColorRange, rangeSpecified ? aspects.mRange : ColorAspects::RangeUnspecified);
+    }
+
+    // HDR Static Info
+    {
+        HDRStaticInfo info, nullInfo; // nullInfo is a fully unspecified static info
+        memset(&info, 0, sizeof(info));
+        memset(&nullInfo, 0, sizeof(nullInfo));
+        if (isValidUint16ColourValue(color->max_cll)) {
+            info.sType1.mMaxContentLightLevel = color->max_cll;
+        }
+        if (isValidUint16ColourValue(color->max_fall)) {
+            info.sType1.mMaxFrameAverageLightLevel = color->max_fall;
+        }
+        const mkvparser::MasteringMetadata *mastering = color->mastering_metadata;
+        if (mastering != NULL) {
+            // Convert matroska values to HDRStaticInfo equivalent values for each fully specified
+            // group. See CTA-681.3 section 3.2.1 for more info.
+            if (mastering->luminance_max >= 0.5 && mastering->luminance_max < 65535.5) {
+                info.sType1.mMaxDisplayLuminance = (uint16_t)(mastering->luminance_max + 0.5);
+            }
+            if (mastering->luminance_min >= 0.00005 && mastering->luminance_min < 6.55355) {
+                // HDRStaticInfo Type1 stores min luminance scaled 10000:1
+                info.sType1.mMinDisplayLuminance =
+                    (uint16_t)(10000 * mastering->luminance_min + 0.5);
+            }
+            // HDRStaticInfo Type1 stores primaries scaled 50000:1
+            if (isValidPrimary(mastering->white_point)) {
+                info.sType1.mW.x = (uint16_t)(50000 * mastering->white_point->x + 0.5);
+                info.sType1.mW.y = (uint16_t)(50000 * mastering->white_point->y + 0.5);
+            }
+            if (isValidPrimary(mastering->r) && isValidPrimary(mastering->g)
+                    && isValidPrimary(mastering->b)) {
+                info.sType1.mR.x = (uint16_t)(50000 * mastering->r->x + 0.5);
+                info.sType1.mR.y = (uint16_t)(50000 * mastering->r->y + 0.5);
+                info.sType1.mG.x = (uint16_t)(50000 * mastering->g->x + 0.5);
+                info.sType1.mG.y = (uint16_t)(50000 * mastering->g->y + 0.5);
+                info.sType1.mB.x = (uint16_t)(50000 * mastering->b->x + 0.5);
+                info.sType1.mB.y = (uint16_t)(50000 * mastering->b->y + 0.5);
+            }
+        }
+        // Only advertise static info if at least one of the groups have been specified.
+        if (memcmp(&info, &nullInfo, sizeof(info)) != 0) {
+            info.mID = HDRStaticInfo::kType1;
+            meta->setData(kKeyHdrStaticInfo, 'hdrS', &info, sizeof(info));
+        }
+    }
+}
+
 void MatroskaExtractor::addTracks() {
     const mkvparser::Tracks *tracks = mSegment->GetTracks();
 
@@ -983,6 +1219,13 @@
                     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP8);
                 } else if (!strcmp("V_VP9", codecID)) {
                     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP9);
+                    if (codecPrivateSize > 0) {
+                      // 'csd-0' for VP9 is the Blob of Codec Private data as
+                      // specified in http://www.webmproject.org/vp9/profiles/.
+                      meta->setData(
+                              kKeyVp9CodecPrivate, 0, codecPrivate,
+                              codecPrivateSize);
+                    }
                 } else {
                     ALOGW("%s is not supported.", codecID);
                     continue;
@@ -990,6 +1233,9 @@
 
                 meta->setInt32(kKeyWidth, vtrack->GetWidth());
                 meta->setInt32(kKeyHeight, vtrack->GetHeight());
+
+                getColorInformation(vtrack, meta);
+
                 break;
             }
 
@@ -1040,10 +1286,31 @@
         meta->setInt64(kKeyDuration, (durationNs + 500) / 1000);
 
         mTracks.push();
-        TrackInfo *trackInfo = &mTracks.editItemAt(mTracks.size() - 1);
+        size_t n = mTracks.size() - 1;
+        TrackInfo *trackInfo = &mTracks.editItemAt(n);
         trackInfo->mTrackNum = track->GetNumber();
         trackInfo->mMeta = meta;
         trackInfo->mExtractor = this;
+
+        trackInfo->mEncrypted = false;
+        for(size_t i = 0; i < track->GetContentEncodingCount() && !trackInfo->mEncrypted; i++) {
+            const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
+            for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
+                const mkvparser::ContentEncoding::ContentEncryption *encryption;
+                encryption = encoding->GetEncryptionByIndex(j);
+                meta->setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
+                trackInfo->mEncrypted = true;
+                break;
+            }
+        }
+
+        if (!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) {
+            // Attempt to recover from AVC track without codec private data
+            err = synthesizeAVCC(trackInfo, n);
+            if (err != OK) {
+                mTracks.pop();
+            }
+        }
     }
 }
 
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
index db36bf8..665e68e 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ b/media/libstagefright/matroska/MatroskaExtractor.h
@@ -18,7 +18,7 @@
 
 #define MATROSKA_EXTRACTOR_H_
 
-#include "mkvparser.hpp"
+#include "mkvparser/mkvparser.h"
 
 #include <media/stagefright/MediaExtractor.h>
 #include <utils/Vector.h>
@@ -29,6 +29,7 @@
 struct AMessage;
 class String8;
 
+class MetaData;
 struct DataSourceReader;
 struct MatroskaSource;
 
@@ -37,7 +38,7 @@
 
     virtual size_t countTracks();
 
-    virtual sp<MediaSource> getTrack(size_t index);
+    virtual sp<IMediaSource> getTrack(size_t index);
 
     virtual sp<MetaData> getTrackMetaData(
             size_t index, uint32_t flags);
@@ -46,6 +47,8 @@
 
     virtual uint32_t flags() const;
 
+    virtual const char * name() { return "MatroskaExtractor"; }
+
 protected:
     virtual ~MatroskaExtractor();
 
@@ -55,6 +58,7 @@
 
     struct TrackInfo {
         unsigned long mTrackNum;
+        bool mEncrypted;
         sp<MetaData> mMeta;
         const MatroskaExtractor *mExtractor;
         Vector<const mkvparser::CuePoint*> mCuePoints;
@@ -74,9 +78,10 @@
     bool mIsWebm;
     int64_t mSeekPreRollNs;
 
+    status_t synthesizeAVCC(TrackInfo *trackInfo, size_t index);
     void addTracks();
     void findThumbnails();
-
+    void getColorInformation(const mkvparser::VideoTrack *vtrack, sp<MetaData> &meta);
     bool isLiveStreaming() const;
 
     MatroskaExtractor(const MatroskaExtractor &);
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 2f2b115..b863d67 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -122,7 +122,7 @@
     void setPID(unsigned pid) { mElementaryPID = pid; }
 
     // Parse the payload and set event when PES with a sync frame is detected.
-    // This method knows when a PES starts; so record mPesStartOffset in that
+    // This method knows when a PES starts; so record mPesStartOffsets in that
     // case.
     status_t parse(
             unsigned continuity_counter,
@@ -157,7 +157,7 @@
     bool mEOSReached;
 
     uint64_t mPrevPTS;
-    off64_t mPesStartOffset;
+    List<off64_t> mPesStartOffsets;
 
     ElementaryStreamQueue *mQueue;
 
@@ -205,16 +205,19 @@
 };
 
 ATSParser::SyncEvent::SyncEvent(off64_t offset)
-    : mInit(false), mOffset(offset), mTimeUs(0) {}
+    : mHasReturnedData(false), mOffset(offset), mTimeUs(0) {}
 
 void ATSParser::SyncEvent::init(off64_t offset, const sp<MediaSource> &source,
         int64_t timeUs) {
-    mInit = true;
+    mHasReturnedData = true;
     mOffset = offset;
     mMediaSource = source;
     mTimeUs = timeUs;
 }
 
+void ATSParser::SyncEvent::reset() {
+    mHasReturnedData = false;
+}
 ////////////////////////////////////////////////////////////////////////////////
 
 ATSParser::Program::Program(
@@ -509,7 +512,7 @@
         mLastRecoveredPTS = static_cast<int64_t>(PTS_33bit);
     } else {
         mLastRecoveredPTS = static_cast<int64_t>(
-                ((mLastRecoveredPTS - PTS_33bit + 0x100000000ll)
+                ((mLastRecoveredPTS - static_cast<int64_t>(PTS_33bit) + 0x100000000ll)
                 & 0xfffffffe00000000ull) | PTS_33bit);
         // We start from 0, but recovered PTS could be slightly below 0.
         // Clamp it to 0 as rest of the pipeline doesn't take negative pts.
@@ -524,15 +527,10 @@
 }
 
 sp<MediaSource> ATSParser::Program::getSource(SourceType type) {
-    size_t index = (type == AUDIO) ? 0 : 0;
-
     for (size_t i = 0; i < mStreams.size(); ++i) {
         sp<MediaSource> source = mStreams.editValueAt(i)->getSource(type);
         if (source != NULL) {
-            if (index == 0) {
-                return source;
-            }
-            --index;
+            return source;
         }
     }
 
@@ -546,6 +544,8 @@
             return true;
         } else if (type == VIDEO && stream->isVideo()) {
             return true;
+        } else if (type == META && stream->isMeta()) {
+            return true;
         }
     }
 
@@ -664,6 +664,7 @@
         ALOGI("discontinuity on stream pid 0x%04x", mElementaryPID);
 
         mPayloadStarted = false;
+        mPesStartOffsets.clear();
         mBuffer->setRange(0, 0);
         mExpectedContinuityCounter = -1;
 
@@ -700,7 +701,7 @@
         }
 
         mPayloadStarted = true;
-        mPesStartOffset = offset;
+        mPesStartOffsets.push_back(offset);
     }
 
     if (!mPayloadStarted) {
@@ -775,6 +776,7 @@
     }
 
     mPayloadStarted = false;
+    mPesStartOffsets.clear();
     mEOSReached = false;
     mBuffer->setRange(0, 0);
 
@@ -1108,7 +1110,9 @@
                 int64_t timeUs;
                 if (accessUnit->meta()->findInt64("timeUs", &timeUs)) {
                     found = true;
-                    event->init(mPesStartOffset, mSource, timeUs);
+                    off64_t pesStartOffset = *mPesStartOffsets.begin();
+                    event->init(pesStartOffset, mSource, timeUs);
+                    mPesStartOffsets.erase(mPesStartOffsets.begin());
                 }
             }
         }
@@ -1434,8 +1438,8 @@
 
             // The number of bytes received by this parser up to and
             // including the final byte of this PCR_ext field.
-            size_t byteOffsetFromStart =
-                mNumTSPacketsParsed * 188 + byteOffsetFromStartOfTSPacket;
+            uint64_t byteOffsetFromStart =
+                uint64_t(mNumTSPacketsParsed) * 188 + byteOffsetFromStartOfTSPacket;
 
             for (size_t i = 0; i < mPrograms.size(); ++i) {
                 updatePCR(PID, PCR, byteOffsetFromStart);
@@ -1499,23 +1503,38 @@
 }
 
 sp<MediaSource> ATSParser::getSource(SourceType type) {
-    int which = -1;  // any
-
+    sp<MediaSource> firstSourceFound;
     for (size_t i = 0; i < mPrograms.size(); ++i) {
         const sp<Program> &program = mPrograms.editItemAt(i);
-
-        if (which >= 0 && (int)program->number() != which) {
+        sp<MediaSource> source = program->getSource(type);
+        if (source == NULL) {
             continue;
         }
+        if (firstSourceFound == NULL) {
+            firstSourceFound = source;
+        }
+        // Prefer programs with both audio/video
+        switch (type) {
+            case VIDEO: {
+                if (program->hasSource(AUDIO)) {
+                    return source;
+                }
+                break;
+            }
 
-        sp<MediaSource> source = program->getSource(type);
+            case AUDIO: {
+                if (program->hasSource(VIDEO)) {
+                    return source;
+                }
+                break;
+            }
 
-        if (source != NULL) {
-            return source;
+            default:
+                return source;
         }
     }
 
-    return NULL;
+    return firstSourceFound;
 }
 
 bool ATSParser::hasSource(SourceType type) const {
@@ -1537,9 +1556,10 @@
     return mPrograms.editItemAt(0)->PTSTimeDeltaEstablished();
 }
 
+__attribute__((no_sanitize("integer")))
 void ATSParser::updatePCR(
-        unsigned /* PID */, uint64_t PCR, size_t byteOffsetFromStart) {
-    ALOGV("PCR 0x%016" PRIx64 " @ %zu", PCR, byteOffsetFromStart);
+        unsigned /* PID */, uint64_t PCR, uint64_t byteOffsetFromStart) {
+    ALOGV("PCR 0x%016" PRIx64 " @ %" PRIx64, PCR, byteOffsetFromStart);
 
     if (mNumPCRs == 2) {
         mPCR[0] = mPCR[1];
@@ -1555,6 +1575,7 @@
     ++mNumPCRs;
 
     if (mNumPCRs == 2) {
+        /* Unsigned overflow here */
         double transportRate =
             (mPCRBytes[1] - mPCRBytes[0]) * 27E6 / (mPCR[1] - mPCR[0]);
 
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 430a8d5..9d9102d 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -69,16 +69,18 @@
         void init(off64_t offset, const sp<MediaSource> &source,
                 int64_t timeUs);
 
-        bool isInit() { return mInit; }
-        off64_t getOffset() { return mOffset; }
-        const sp<MediaSource> &getMediaSource() { return mMediaSource; }
-        int64_t getTimeUs() { return mTimeUs; }
+        bool hasReturnedData() const { return mHasReturnedData; }
+        void reset();
+        off64_t getOffset() const { return mOffset; }
+        const sp<MediaSource> &getMediaSource() const { return mMediaSource; }
+        int64_t getTimeUs() const { return mTimeUs; }
 
     private:
-        bool mInit;
+        bool mHasReturnedData;
         /*
-         * mInit == false: the current offset
-         * mInit == true: the start offset of sync payload
+         * mHasReturnedData == false: the current offset (or undefined if the returned data
+                                      has been invalidated via reset())
+         * mHasReturnedData == true: the start offset of sync payload
          */
         off64_t mOffset;
         /* The media source object for this event. */
@@ -180,10 +182,10 @@
     // see feedTSPacket().
     status_t parseTS(ABitReader *br, SyncEvent *event);
 
-    void updatePCR(unsigned PID, uint64_t PCR, size_t byteOffsetFromStart);
+    void updatePCR(unsigned PID, uint64_t PCR, uint64_t byteOffsetFromStart);
 
     uint64_t mPCR[2];
-    size_t mPCRBytes[2];
+    uint64_t mPCRBytes[2];
     int64_t mSystemTimeUs[2];
     size_t mNumPCRs;
 
diff --git a/media/libstagefright/mpeg2ts/Android.mk b/media/libstagefright/mpeg2ts/Android.mk
index 16b0160..70afde9 100644
--- a/media/libstagefright/mpeg2ts/Android.mk
+++ b/media/libstagefright/mpeg2ts/Android.mk
@@ -15,6 +15,7 @@
 
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright_mpeg2ts
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index cabde32..4fcf7b5 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -216,6 +216,12 @@
             mediaBuffer->meta_data()->setData(kKeySEI, 0, sei->data(), sei->size());
         }
 
+        sp<ABuffer> mpegUserData;
+        if (buffer->meta()->findBuffer("mpegUserData", &mpegUserData) && mpegUserData != NULL) {
+            mediaBuffer->meta_data()->setData(
+                    kKeyMpegUserData, 0, mpegUserData->data(), mpegUserData->size());
+        }
+
         *out = mediaBuffer;
         return OK;
     }
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 36ec367..96ca405 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -592,6 +592,7 @@
         mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
         mFormat->setInt32(kKeyChannelCount, 2);
         mFormat->setInt32(kKeySampleRate, 48000);
+        mFormat->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
     }
 
     static const size_t kFramesPerAU = 80;
@@ -1047,6 +1048,8 @@
     const uint8_t *data = mBuffer->data();
     size_t size = mBuffer->size();
 
+    Vector<size_t> userDataPositions;
+
     bool sawPictureStart = false;
     int pprevStartCode = -1;
     int prevStartCode = -1;
@@ -1121,11 +1124,19 @@
 
         if (mFormat != NULL && currentStartCode == 0xb8) {
             // GOP layer
+            if (offset + 7 >= size) {
+                ALOGE("Size too small");
+                return NULL;
+            }
             gopFound = true;
             isClosedGop = (data[offset + 7] & 0x40) != 0;
             brokenLink = (data[offset + 7] & 0x20) != 0;
         }
 
+        if (mFormat != NULL && currentStartCode == 0xb2) {
+            userDataPositions.add(offset);
+        }
+
         if (mFormat != NULL && currentStartCode == 0x00) {
             // Picture start
 
@@ -1159,6 +1170,19 @@
 
                 // hexdump(accessUnit->data(), accessUnit->size());
 
+                if (userDataPositions.size() > 0) {
+                    sp<ABuffer> mpegUserData =
+                        new ABuffer(userDataPositions.size() * sizeof(size_t));
+                    if (mpegUserData != NULL && mpegUserData->data() != NULL) {
+                        for (size_t i = 0; i < userDataPositions.size(); ++i) {
+                            memcpy(
+                                    mpegUserData->data() + i * sizeof(size_t),
+                                    &userDataPositions[i], sizeof(size_t));
+                        }
+                        accessUnit->meta()->setBuffer("mpegUserData", mpegUserData);
+                    }
+                }
+
                 return accessUnit;
             }
         }
diff --git a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
index 6d9fe9d..078a5f0 100644
--- a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
@@ -108,7 +108,8 @@
     }
 
     // Remove all tracks that were unable to determine their format.
-    for (size_t i = mTracks.size(); i-- > 0;) {
+    for (size_t i = mTracks.size(); i > 0;) {
+        i--;
         if (mTracks.valueAt(i)->getFormat() == NULL) {
             mTracks.removeItemsAt(i);
         }
@@ -124,7 +125,7 @@
     return mTracks.size();
 }
 
-sp<MediaSource> MPEG2PSExtractor::getTrack(size_t index) {
+sp<IMediaSource> MPEG2PSExtractor::getTrack(size_t index) {
     if (index >= mTracks.size()) {
         return NULL;
     }
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index cbe9673..fb5e079 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -112,6 +112,7 @@
 MPEG2TSExtractor::MPEG2TSExtractor(const sp<DataSource> &source)
     : mDataSource(source),
       mParser(new ATSParser),
+      mLastSyncEvent(0),
       mOffset(0) {
     init();
 }
@@ -120,7 +121,7 @@
     return mSourceImpls.size();
 }
 
-sp<MediaSource> MPEG2TSExtractor::getTrack(size_t index) {
+sp<IMediaSource> MPEG2TSExtractor::getTrack(size_t index) {
     if (index >= mSourceImpls.size()) {
         return NULL;
     }
@@ -149,8 +150,10 @@
     bool haveVideo = false;
     int64_t startTime = ALooper::GetNowUs();
 
-    while (feedMore() == OK) {
+    while (feedMore(true /* isInit */) == OK) {
         if (haveAudio && haveVideo) {
+            addSyncPoint_l(mLastSyncEvent);
+            mLastSyncEvent.reset();
             break;
         }
         if (!haveVideo) {
@@ -181,6 +184,9 @@
             }
         }
 
+        addSyncPoint_l(mLastSyncEvent);
+        mLastSyncEvent.reset();
+
         // Wait only for 2 seconds to detect audio/video streams.
         if (ALooper::GetNowUs() - startTime > 2000000ll) {
             break;
@@ -245,7 +251,7 @@
             haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
 }
 
-status_t MPEG2TSExtractor::feedMore() {
+status_t MPEG2TSExtractor::feedMore(bool isInit) {
     Mutex::Autolock autoLock(mLock);
 
     uint8_t packet[kTSPacketSize];
@@ -261,29 +267,41 @@
     ATSParser::SyncEvent event(mOffset);
     mOffset += n;
     status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event);
-    if (event.isInit()) {
-        for (size_t i = 0; i < mSourceImpls.size(); ++i) {
-            if (mSourceImpls[i].get() == event.getMediaSource().get()) {
-                KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
-                syncPoints->add(event.getTimeUs(), event.getOffset());
-                // We're keeping the size of the sync points at most 5mb per a track.
-                size_t size = syncPoints->size();
-                if (size >= 327680) {
-                    int64_t firstTimeUs = syncPoints->keyAt(0);
-                    int64_t lastTimeUs = syncPoints->keyAt(size - 1);
-                    if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
-                        syncPoints->removeItemsAt(0, 4096);
-                    } else {
-                        syncPoints->removeItemsAt(size - 4096, 4096);
-                    }
-                }
-                break;
-            }
+    if (event.hasReturnedData()) {
+        if (isInit) {
+            mLastSyncEvent = event;
+        } else {
+            addSyncPoint_l(event);
         }
     }
     return err;
 }
 
+void MPEG2TSExtractor::addSyncPoint_l(const ATSParser::SyncEvent &event) {
+    if (!event.hasReturnedData()) {
+        return;
+    }
+
+    for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+        if (mSourceImpls[i].get() == event.getMediaSource().get()) {
+            KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
+            syncPoints->add(event.getTimeUs(), event.getOffset());
+            // We're keeping the size of the sync points at most 5mb per a track.
+            size_t size = syncPoints->size();
+            if (size >= 327680) {
+                int64_t firstTimeUs = syncPoints->keyAt(0);
+                int64_t lastTimeUs = syncPoints->keyAt(size - 1);
+                if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
+                    syncPoints->removeItemsAt(0, 4096);
+                } else {
+                    syncPoints->removeItemsAt(size - 4096, 4096);
+                }
+            }
+            break;
+        }
+    }
+}
+
 uint32_t MPEG2TSExtractor::flags() const {
     return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
 }
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index 5f0f567..e4fbd81 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -7,6 +7,7 @@
         OMX.cpp                       \
         OMXMaster.cpp                 \
         OMXNodeInstance.cpp           \
+        OMXUtils.cpp                  \
         SimpleSoftOMXComponent.cpp    \
         SoftOMXComponent.cpp          \
         SoftOMXPlugin.cpp             \
@@ -33,6 +34,7 @@
 LOCAL_MODULE:= libstagefright_omx
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 1a7dc9d..995e50e 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -20,12 +20,16 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#define STRINGIFY_ENUMS // for asString in HardwareAPI.h/VideoAPI.h
+
 #include "GraphicBufferSource.h"
+#include "OMXUtils.h"
 
 #include <OMX_Core.h>
 #include <OMX_IndexExt.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 
 #include <media/hardware/MetadataBufferType.h>
 #include <ui/GraphicBuffer.h>
@@ -39,6 +43,8 @@
 
 static const bool EXTRA_CHECK = true;
 
+static const OMX_U32 kPortIndexInput = 0;
+
 GraphicBufferSource::PersistentProxyListener::PersistentProxyListener(
         const wp<IGraphicBufferConsumer> &consumer,
         const wp<ConsumerListener>& consumerListener) :
@@ -64,19 +70,19 @@
             return;
         }
 
-        err = consumer->detachBuffer(bi.mBuf);
+        err = consumer->detachBuffer(bi.mSlot);
         if (err != OK) {
             ALOGE("PersistentProxyListener: detachBuffer failed (%d)", err);
             return;
         }
 
-        err = consumer->attachBuffer(&bi.mBuf, bi.mGraphicBuffer);
+        err = consumer->attachBuffer(&bi.mSlot, bi.mGraphicBuffer);
         if (err != OK) {
             ALOGE("PersistentProxyListener: attachBuffer failed (%d)", err);
             return;
         }
 
-        err = consumer->releaseBuffer(bi.mBuf, 0,
+        err = consumer->releaseBuffer(bi.mSlot, 0,
                 EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, bi.mFence);
         if (err != OK) {
             ALOGE("PersistentProxyListener: releaseBuffer failed (%d)", err);
@@ -117,6 +123,7 @@
     mNodeInstance(nodeInstance),
     mExecuting(false),
     mSuspended(false),
+    mLastDataSpace(HAL_DATASPACE_UNKNOWN),
     mIsPersistent(false),
     mConsumer(consumer),
     mNumFramesAvailable(0),
@@ -189,6 +196,8 @@
         return;
     }
 
+    memset(&mColorAspects, 0, sizeof(mColorAspects));
+
     CHECK(mInitCheck == NO_ERROR);
 }
 
@@ -215,6 +224,8 @@
             mNumFramesAvailable, mCodecBuffers.size());
     CHECK(!mExecuting);
     mExecuting = true;
+    mLastDataSpace = HAL_DATASPACE_UNKNOWN;
+    ALOGV("clearing last dataSpace");
 
     // Start by loading up as many buffers as possible.  We want to do this,
     // rather than just submit the first buffer, to avoid a degenerate case:
@@ -382,7 +393,7 @@
     // Find matching entry in our cached copy of the BufferQueue slots.
     // If we find a match, release that slot.  If we don't, the BufferQueue
     // has dropped that GraphicBuffer, and there's nothing for us to release.
-    int id = codecBuffer.mBuf;
+    int id = codecBuffer.mSlot;
     sp<Fence> fence = new Fence(fenceFd);
     if (mBufferSlot[id] != NULL &&
         mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
@@ -476,7 +487,7 @@
             ++mNumBufferAcquired;
             --mNumFramesAvailable;
 
-            releaseBuffer(item.mBuf, item.mFrameNumber,
+            releaseBuffer(item.mSlot, item.mFrameNumber,
                     item.mGraphicBuffer, item.mFence);
         }
         return;
@@ -495,6 +506,76 @@
     }
 }
 
+void GraphicBufferSource::onDataSpaceChanged_l(
+        android_dataspace dataSpace, android_pixel_format pixelFormat) {
+    ALOGD("got buffer with new dataSpace #%x", dataSpace);
+    mLastDataSpace = dataSpace;
+
+    if (ColorUtils::convertDataSpaceToV0(dataSpace)) {
+        ColorAspects aspects = mColorAspects; // initially requested aspects
+
+        // request color aspects to encode
+        OMX_INDEXTYPE index;
+        status_t err = mNodeInstance->getExtensionIndex(
+                "OMX.google.android.index.describeColorAspects", &index);
+        if (err == OK) {
+            // V0 dataspace
+            DescribeColorAspectsParams params;
+            InitOMXParams(&params);
+            params.nPortIndex = kPortIndexInput;
+            params.nDataSpace = mLastDataSpace;
+            params.nPixelFormat = pixelFormat;
+            params.bDataSpaceChanged = OMX_TRUE;
+            params.sAspects = mColorAspects;
+
+            err = mNodeInstance->getConfig(index, &params, sizeof(params));
+            if (err == OK) {
+                aspects = params.sAspects;
+                ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+                        params.sAspects.mRange, asString(params.sAspects.mRange),
+                        params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+                        params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+                        params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+                        err, asString(err));
+            } else {
+                params.sAspects = aspects;
+                err = OK;
+            }
+            params.bDataSpaceChanged = OMX_FALSE;
+            for (int triesLeft = 2; --triesLeft >= 0; ) {
+                status_t err = mNodeInstance->setConfig(index, &params, sizeof(params));
+                if (err == OK) {
+                    err = mNodeInstance->getConfig(index, &params, sizeof(params));
+                }
+                if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+                        params.sAspects, aspects)) {
+                    // if we can't set or get color aspects, still communicate dataspace to client
+                    break;
+                }
+
+                ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
+            }
+        }
+
+        ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+                aspects.mRange, asString(aspects.mRange),
+                aspects.mPrimaries, asString(aspects.mPrimaries),
+                aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+                aspects.mTransfer, asString(aspects.mTransfer),
+                err, asString(err));
+
+        // signal client that the dataspace has changed; this will update the output format
+        // TODO: we should tie this to an output buffer somehow, and signal the change
+        // just before the output buffer is returned to the client, but there are many
+        // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
+
+        mNodeInstance->signalEvent(
+                OMX_EventDataSpaceChanged, dataSpace,
+                (aspects.mRange << 24) | (aspects.mPrimaries << 16)
+                        | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer);
+    }
+}
+
 bool GraphicBufferSource::fillCodecBuffer_l() {
     CHECK(mExecuting && mNumFramesAvailable > 0);
 
@@ -530,10 +611,16 @@
     // If this is the first time we're seeing this buffer, add it to our
     // slot table.
     if (item.mGraphicBuffer != NULL) {
-        ALOGV("fillCodecBuffer_l: setting mBufferSlot %d", item.mBuf);
-        mBufferSlot[item.mBuf] = item.mGraphicBuffer;
+        ALOGV("fillCodecBuffer_l: setting mBufferSlot %d", item.mSlot);
+        mBufferSlot[item.mSlot] = item.mGraphicBuffer;
     }
 
+    if (item.mDataSpace != mLastDataSpace) {
+        onDataSpaceChanged_l(
+                item.mDataSpace, (android_pixel_format)mBufferSlot[item.mSlot]->getPixelFormat());
+    }
+
+
     err = UNKNOWN_ERROR;
 
     // only submit sample if start time is unspecified, or sample
@@ -557,10 +644,10 @@
     }
 
     if (err != OK) {
-        ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf);
-        releaseBuffer(item.mBuf, item.mFrameNumber, item.mGraphicBuffer, item.mFence);
+        ALOGV("submitBuffer_l failed, releasing bq slot %d", item.mSlot);
+        releaseBuffer(item.mSlot, item.mFrameNumber, item.mGraphicBuffer, item.mFence);
     } else {
-        ALOGV("buffer submitted (bq %d, cbi %d)", item.mBuf, cbi);
+        ALOGV("buffer submitted (bq %d, cbi %d)", item.mSlot, cbi);
         setLatestBuffer_l(item, dropped);
     }
 
@@ -600,7 +687,7 @@
     }
 
     BufferItem item;
-    item.mBuf = mLatestBufferId;
+    item.mSlot = mLatestBufferId;
     item.mFrameNumber = mLatestBufferFrameNum;
     item.mTimestamp = mRepeatLastFrameTimestamp;
     item.mFence = mLatestBufferFence;
@@ -642,7 +729,7 @@
         }
     }
 
-    mLatestBufferId = item.mBuf;
+    mLatestBufferId = item.mSlot;
     mLatestBufferFrameNum = item.mFrameNumber;
     mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
 
@@ -754,8 +841,8 @@
     }
 
     CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
-    codecBuffer.mGraphicBuffer = mBufferSlot[item.mBuf];
-    codecBuffer.mBuf = item.mBuf;
+    codecBuffer.mGraphicBuffer = mBufferSlot[item.mSlot];
+    codecBuffer.mSlot = item.mSlot;
     codecBuffer.mFrameNumber = item.mFrameNumber;
 
     OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
@@ -880,11 +967,11 @@
             // If this is the first time we're seeing this buffer, add it to our
             // slot table.
             if (item.mGraphicBuffer != NULL) {
-                ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mBuf);
-                mBufferSlot[item.mBuf] = item.mGraphicBuffer;
+                ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mSlot);
+                mBufferSlot[item.mSlot] = item.mGraphicBuffer;
             }
 
-            releaseBuffer(item.mBuf, item.mFrameNumber,
+            releaseBuffer(item.mSlot, item.mFrameNumber,
                     item.mGraphicBuffer, item.mFence);
         }
         return;
@@ -925,6 +1012,13 @@
     ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
 }
 
+void GraphicBufferSource::setDefaultDataSpace(android_dataspace dataSpace) {
+    // no need for mutex as we are not yet running
+    ALOGD("setting dataspace: %#x", dataSpace);
+    mConsumer->setDefaultBufferDataSpace(dataSpace);
+    mLastDataSpace = dataSpace;
+}
+
 status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(
         int64_t repeatAfterUs) {
     Mutex::Autolock autoLock(mMutex);
@@ -974,19 +1068,29 @@
             (skipFramesBeforeUs > 0) ? (skipFramesBeforeUs * 1000) : -1ll;
 }
 
-status_t GraphicBufferSource::setTimeLapseUs(int64_t* data) {
+status_t GraphicBufferSource::setTimeLapseConfig(const TimeLapseConfig &config) {
     Mutex::Autolock autoLock(mMutex);
 
-    if (mExecuting || data[0] <= 0ll || data[1] <= 0ll) {
+    if (mExecuting || config.mTimePerFrameUs <= 0ll || config.mTimePerCaptureUs <= 0ll) {
         return INVALID_OPERATION;
     }
 
-    mTimePerFrameUs = data[0];
-    mTimePerCaptureUs = data[1];
+    mTimePerFrameUs = config.mTimePerFrameUs;
+    mTimePerCaptureUs = config.mTimePerCaptureUs;
 
     return OK;
 }
 
+void GraphicBufferSource::setColorAspects(const ColorAspects &aspects) {
+    Mutex::Autolock autoLock(mMutex);
+    mColorAspects = aspects;
+    ALOGD("requesting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s))",
+            aspects.mRange, asString(aspects.mRange),
+            aspects.mPrimaries, asString(aspects.mPrimaries),
+            aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+            aspects.mTransfer, asString(aspects.mTransfer));
+}
+
 void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatRepeatLastFrame:
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 2f929d9..c8b0e62 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -23,6 +23,7 @@
 #include <utils/RefBase.h>
 
 #include <OMX_Core.h>
+#include <VideoAPI.h>
 #include "../include/OMXNodeInstance.h"
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/foundation/AHandlerReflector.h>
@@ -73,6 +74,9 @@
         return mProducer;
     }
 
+    // Sets the default buffer data space
+    void setDefaultDataSpace(android_dataspace dataSpace);
+
     // This is called when OMX transitions to OMX_StateExecuting, which means
     // we can start handing it buffers.  If we already have buffers of data
     // sitting in the BufferQueue, this will send them to the codec.
@@ -130,17 +134,23 @@
     // When set, the max frame rate fed to the encoder will be capped at maxFps.
     status_t setMaxFps(float maxFps);
 
+    struct TimeLapseConfig {
+        int64_t mTimePerFrameUs;   // the time (us) between two frames for playback
+        int64_t mTimePerCaptureUs; // the time (us) between two frames for capture
+    };
+
     // Sets the time lapse (or slow motion) parameters.
-    // data[0] is the time (us) between two frames for playback
-    // data[1] is the time (us) between two frames for capture
     // When set, the sample's timestamp will be modified to playback framerate,
     // and capture timestamp will be modified to capture rate.
-    status_t setTimeLapseUs(int64_t* data);
+    status_t setTimeLapseConfig(const TimeLapseConfig &config);
 
     // Sets the start time us (in system time), samples before which should
     // be dropped and not submitted to encoder
     void setSkipFramesBeforeUs(int64_t startTimeUs);
 
+    // Sets the desired color aspects, e.g. to be used when producer does not specify a dataspace.
+    void setColorAspects(const ColorAspects &aspects);
+
 protected:
     // BufferQueue::ConsumerListener interface, called when a new frame of
     // data is available.  If we're executing and a codec buffer is
@@ -195,7 +205,7 @@
         uint64_t mFrameNumber;
 
         // buffer producer's buffer slot for buffer
-        int mBuf;
+        int mSlot;
 
         sp<GraphicBuffer> mGraphicBuffer;
     };
@@ -238,6 +248,9 @@
     bool repeatLatestBuffer_l();
     int64_t getTimestamp(const BufferItem &item);
 
+    // called when the data space of the input buffer changes
+    void onDataSpaceChanged_l(android_dataspace dataSpace, android_pixel_format pixelFormat);
+
     // Lock, covers all member variables.
     mutable Mutex mMutex;
 
@@ -252,6 +265,9 @@
 
     bool mSuspended;
 
+    // Last dataspace seen
+    android_dataspace mLastDataSpace;
+
     // Our BufferQueue interfaces. mProducer is passed to the producer through
     // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
     // the buffers queued by the producer.
@@ -321,6 +337,7 @@
     int64_t mPrevFrameUs;
 
     MetadataBufferType mMetadataBufferType;
+    ColorAspects mColorAspects;
 
     void onMessageReceived(const sp<AMessage> &msg);
 
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 56b6055..2e989b5 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -31,6 +31,7 @@
 #include <utils/threads.h>
 
 #include "OMXMaster.h"
+#include "OMXUtils.h"
 
 #include <OMX_AsString.h>
 #include <OMX_Component.h>
@@ -38,6 +39,9 @@
 
 namespace android {
 
+// node ids are created by concatenating the pid with a 16-bit counter
+static size_t kMaxNodeInstances = (1 << 16);
+
 ////////////////////////////////////////////////////////////////////////////////
 
 // This provides the underlying Thread used by CallbackDispatcher.
@@ -237,10 +241,19 @@
 }
 
 status_t OMX::allocateNode(
-        const char *name, const sp<IOMXObserver> &observer, node_id *node) {
+        const char *name, const sp<IOMXObserver> &observer,
+        sp<IBinder> *nodeBinder, node_id *node) {
     Mutex::Autolock autoLock(mLock);
 
     *node = 0;
+    if (nodeBinder != NULL) {
+        *nodeBinder = NULL;
+    }
+
+    if (mNodeIDToInstance.size() == kMaxNodeInstances) {
+        // all possible node IDs are in use
+        return NO_MEMORY;
+    }
 
     OMXNodeInstance *instance = new OMXNodeInstance(this, observer, name);
 
@@ -257,7 +270,7 @@
         return StatusFromOMXError(err);
     }
 
-    *node = makeNodeID(instance);
+    *node = makeNodeID_l(instance);
     mDispatchers.add(*node, new CallbackDispatcher(instance));
 
     instance->setHandle(*node, handle);
@@ -271,6 +284,10 @@
 status_t OMX::freeNode(node_id node) {
     OMXNodeInstance *instance = findInstance(node);
 
+    if (instance == NULL) {
+        return OK;
+    }
+
     {
         Mutex::Autolock autoLock(mLock);
         ssize_t index = mLiveNodes.indexOfKey(IInterface::asBinder(instance->observer()));
@@ -298,14 +315,26 @@
 
 status_t OMX::sendCommand(
         node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
-    return findInstance(node)->sendCommand(cmd, param);
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->sendCommand(cmd, param);
 }
 
 status_t OMX::getParameter(
         node_id node, OMX_INDEXTYPE index,
         void *params, size_t size) {
     ALOGV("getParameter(%u %#x %p %zd)", node, index, params, size);
-    return findInstance(node)->getParameter(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->getParameter(
             index, params, size);
 }
 
@@ -313,85 +342,176 @@
         node_id node, OMX_INDEXTYPE index,
         const void *params, size_t size) {
     ALOGV("setParameter(%u %#x %p %zd)", node, index, params, size);
-    return findInstance(node)->setParameter(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->setParameter(
             index, params, size);
 }
 
 status_t OMX::getConfig(
         node_id node, OMX_INDEXTYPE index,
         void *params, size_t size) {
-    return findInstance(node)->getConfig(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->getConfig(
             index, params, size);
 }
 
 status_t OMX::setConfig(
         node_id node, OMX_INDEXTYPE index,
         const void *params, size_t size) {
-    return findInstance(node)->setConfig(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->setConfig(
             index, params, size);
 }
 
 status_t OMX::getState(
         node_id node, OMX_STATETYPE* state) {
-    return findInstance(node)->getState(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->getState(
             state);
 }
 
-status_t OMX::enableGraphicBuffers(
-        node_id node, OMX_U32 port_index, OMX_BOOL enable) {
-    return findInstance(node)->enableGraphicBuffers(port_index, enable);
+status_t OMX::enableNativeBuffers(
+        node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->enableNativeBuffers(port_index, graphic, enable);
 }
 
 status_t OMX::getGraphicBufferUsage(
         node_id node, OMX_U32 port_index, OMX_U32* usage) {
-    return findInstance(node)->getGraphicBufferUsage(port_index, usage);
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->getGraphicBufferUsage(port_index, usage);
 }
 
 status_t OMX::storeMetaDataInBuffers(
         node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
-    return findInstance(node)->storeMetaDataInBuffers(port_index, enable, type);
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->storeMetaDataInBuffers(port_index, enable, type);
 }
 
 status_t OMX::prepareForAdaptivePlayback(
         node_id node, OMX_U32 portIndex, OMX_BOOL enable,
         OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
-    return findInstance(node)->prepareForAdaptivePlayback(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->prepareForAdaptivePlayback(
             portIndex, enable, maxFrameWidth, maxFrameHeight);
 }
 
 status_t OMX::configureVideoTunnelMode(
         node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
         OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
-    return findInstance(node)->configureVideoTunnelMode(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->configureVideoTunnelMode(
             portIndex, tunneled, audioHwSync, sidebandHandle);
 }
 
 status_t OMX::useBuffer(
         node_id node, OMX_U32 port_index, const sp<IMemory> &params,
         buffer_id *buffer, OMX_U32 allottedSize) {
-    return findInstance(node)->useBuffer(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->useBuffer(
             port_index, params, buffer, allottedSize);
 }
 
 status_t OMX::useGraphicBuffer(
         node_id node, OMX_U32 port_index,
         const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
-    return findInstance(node)->useGraphicBuffer(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->useGraphicBuffer(
             port_index, graphicBuffer, buffer);
 }
 
 status_t OMX::updateGraphicBufferInMeta(
         node_id node, OMX_U32 port_index,
         const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
-    return findInstance(node)->updateGraphicBufferInMeta(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->updateGraphicBufferInMeta(
             port_index, graphicBuffer, buffer);
 }
 
-status_t OMX::createInputSurface(
+status_t OMX::updateNativeHandleInMeta(
         node_id node, OMX_U32 port_index,
+        const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->updateNativeHandleInMeta(
+            port_index, nativeHandle, buffer);
+}
+
+status_t OMX::createInputSurface(
+        node_id node, OMX_U32 port_index, android_dataspace dataSpace,
         sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
-    return findInstance(node)->createInputSurface(
-            port_index, bufferProducer, type);
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->createInputSurface(
+            port_index, dataSpace, bufferProducer, type);
 }
 
 status_t OMX::createPersistentInputSurface(
@@ -404,35 +524,71 @@
 status_t OMX::setInputSurface(
         node_id node, OMX_U32 port_index,
         const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
-    return findInstance(node)->setInputSurface(port_index, bufferConsumer, type);
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->setInputSurface(port_index, bufferConsumer, type);
 }
 
 
 status_t OMX::signalEndOfInputStream(node_id node) {
-    return findInstance(node)->signalEndOfInputStream();
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->signalEndOfInputStream();
 }
 
-status_t OMX::allocateBuffer(
+status_t OMX::allocateSecureBuffer(
         node_id node, OMX_U32 port_index, size_t size,
-        buffer_id *buffer, void **buffer_data) {
-    return findInstance(node)->allocateBuffer(
-            port_index, size, buffer, buffer_data);
+        buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->allocateSecureBuffer(
+            port_index, size, buffer, buffer_data, native_handle);
 }
 
 status_t OMX::allocateBufferWithBackup(
         node_id node, OMX_U32 port_index, const sp<IMemory> &params,
         buffer_id *buffer, OMX_U32 allottedSize) {
-    return findInstance(node)->allocateBufferWithBackup(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->allocateBufferWithBackup(
             port_index, params, buffer, allottedSize);
 }
 
 status_t OMX::freeBuffer(node_id node, OMX_U32 port_index, buffer_id buffer) {
-    return findInstance(node)->freeBuffer(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->freeBuffer(
             port_index, buffer);
 }
 
 status_t OMX::fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
-    return findInstance(node)->fillBuffer(buffer, fenceFd);
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->fillBuffer(buffer, fenceFd);
 }
 
 status_t OMX::emptyBuffer(
@@ -440,7 +596,13 @@
         buffer_id buffer,
         OMX_U32 range_offset, OMX_U32 range_length,
         OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
-    return findInstance(node)->emptyBuffer(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->emptyBuffer(
             buffer, range_offset, range_length, flags, timestamp, fenceFd);
 }
 
@@ -448,7 +610,13 @@
         node_id node,
         const char *parameter_name,
         OMX_INDEXTYPE *index) {
-    return findInstance(node)->getExtensionIndex(
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->getExtensionIndex(
             parameter_name, index);
 }
 
@@ -458,7 +626,13 @@
         InternalOptionType type,
         const void *data,
         size_t size) {
-    return findInstance(node)->setInternalOption(port_index, type, data, size);
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->setInternalOption(port_index, type, data, size);
 }
 
 OMX_ERRORTYPE OMX::OnEvent(
@@ -468,9 +642,14 @@
         OMX_IN OMX_U32 nData2,
         OMX_IN OMX_PTR pEventData) {
     ALOGV("OnEvent(%d, %" PRIu32", %" PRIu32 ")", eEvent, nData1, nData2);
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return OMX_ErrorComponentNotFound;
+    }
 
     // Forward to OMXNodeInstance.
-    findInstance(node)->onEvent(eEvent, nData1, nData2);
+    instance->onEvent(eEvent, nData1, nData2);
 
     sp<OMX::CallbackDispatcher> dispatcher = findDispatcher(node);
 
@@ -542,10 +721,17 @@
     return OMX_ErrorNone;
 }
 
-OMX::node_id OMX::makeNodeID(OMXNodeInstance *instance) {
+OMX::node_id OMX::makeNodeID_l(OMXNodeInstance *instance) {
     // mLock is already held.
 
-    node_id node = (node_id)++mNodeCounter;
+    node_id prefix = node_id(getpid() << 16);
+    node_id node = 0;
+    do  {
+        if (++mNodeCounter >= kMaxNodeInstances) {
+            mNodeCounter = 0; // OK to use because we're combining with the pid
+        }
+        node = node_id(prefix | mNodeCounter);
+    } while (mNodeIDToInstance.indexOfKey(node) >= 0);
     mNodeIDToInstance.add(node, instance);
 
     return node;
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index ae3cb33..6132a2c 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -23,6 +23,7 @@
 #include "SoftOMXPlugin.h"
 
 #include <dlfcn.h>
+#include <fcntl.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 
@@ -30,6 +31,29 @@
 
 OMXMaster::OMXMaster()
     : mVendorLibHandle(NULL) {
+
+    mProcessName[0] = 0;
+    if (mProcessName[0] == 0) {
+        pid_t pid = getpid();
+        char filename[20];
+        snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
+        int fd = open(filename, O_RDONLY);
+        if (fd < 0) {
+            ALOGW("couldn't determine process name");
+            sprintf(mProcessName, "<unknown>");
+        } else {
+            ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
+            if (len < 2) {
+                ALOGW("couldn't determine process name");
+                sprintf(mProcessName, "<unknown>");
+            } else {
+                // the name is newline terminated, so erase the newline
+                mProcessName[len - 1] = 0;
+            }
+            close(fd);
+        }
+    }
+
     addVendorPlugin();
     addPlugin(new SoftOMXPlugin);
 }
@@ -123,6 +147,7 @@
         const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData,
         OMX_COMPONENTTYPE **component) {
+    ALOGI("makeComponentInstance(%s) in %s process", name, mProcessName);
     Mutex::Autolock autoLock(mLock);
 
     *component = NULL;
diff --git a/media/libstagefright/omx/OMXMaster.h b/media/libstagefright/omx/OMXMaster.h
index 6069741..3f9c0ca 100644
--- a/media/libstagefright/omx/OMXMaster.h
+++ b/media/libstagefright/omx/OMXMaster.h
@@ -50,6 +50,7 @@
             Vector<String8> *roles);
 
 private:
+    char mProcessName[16];
     Mutex mLock;
     List<OMXPluginBase *> mPlugins;
     KeyedVector<String8, OMXPluginBase *> mPluginByComponentName;
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 7f534b5e..4f1a952 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -22,6 +22,7 @@
 
 #include "../include/OMXNodeInstance.h"
 #include "OMXMaster.h"
+#include "OMXUtils.h"
 #include "GraphicBufferSource.h"
 
 #include <OMX_Component.h>
@@ -29,13 +30,14 @@
 #include <OMX_AsString.h>
 
 #include <binder/IMemory.h>
+#include <cutils/properties.h>
 #include <gui/BufferQueue.h>
 #include <HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/MediaErrors.h>
-
 #include <utils/misc.h>
+#include <utils/NativeHandle.h>
 
 static const OMX_U32 kPortIndexInput = 0;
 static const OMX_U32 kPortIndexOutput = 1;
@@ -88,16 +90,6 @@
 // TRICKY: this is needed so formatting macros expand before substitution
 #define WITH_STATS(fmt, ...) WITH_STATS_WRAPPER(fmt, ##__VA_ARGS__)
 
-template<class T>
-static void InitOMXParams(T *params) {
-    memset(params, 0, sizeof(T));
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
 namespace android {
 
 struct BufferMeta {
@@ -163,12 +155,17 @@
         mGraphicBuffer = graphicBuffer;
     }
 
+    void setNativeHandle(const sp<NativeHandle> &nativeHandle) {
+        mNativeHandle = nativeHandle;
+    }
+
     OMX_U32 getPortIndex() {
         return mPortIndex;
     }
 
 private:
     sp<GraphicBuffer> mGraphicBuffer;
+    sp<NativeHandle> mNativeHandle;
     sp<IMemory> mMem;
     size_t mSize;
     bool mIsBackup;
@@ -211,6 +208,8 @@
     mDebugLevelBumpPendingBuffers[1] = 0;
     mMetadataType[0] = kMetadataBufferTypeInvalid;
     mMetadataType[1] = kMetadataBufferTypeInvalid;
+    mSecureBufferType[0] = kSecureBufferTypeUnknown;
+    mSecureBufferType[1] = kSecureBufferTypeUnknown;
     mIsSecure = AString(name).endsWith(".secure");
 }
 
@@ -250,20 +249,6 @@
     return mNodeID;
 }
 
-status_t StatusFromOMXError(OMX_ERRORTYPE err) {
-    switch (err) {
-        case OMX_ErrorNone:
-            return OK;
-        case OMX_ErrorUnsupportedSetting:
-        case OMX_ErrorUnsupportedIndex:
-            return ERROR_UNSUPPORTED;
-        case OMX_ErrorInsufficientResources:
-            return NO_MEMORY;
-        default:
-            return UNKNOWN_ERROR;
-    }
-}
-
 status_t OMXNodeInstance::freeNode(OMXMaster *master) {
     CLOG_LIFE(freeNode, "handle=%p", mHandle);
     static int32_t kMaxNumIterations = 10;
@@ -461,29 +446,52 @@
     return StatusFromOMXError(err);
 }
 
-status_t OMXNodeInstance::enableGraphicBuffers(
-        OMX_U32 portIndex, OMX_BOOL enable) {
+status_t OMXNodeInstance::enableNativeBuffers(
+        OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable) {
     Mutex::Autolock autoLock(mLock);
-    CLOG_CONFIG(enableGraphicBuffers, "%s:%u, %d", portString(portIndex), portIndex, enable);
+    CLOG_CONFIG(enableNativeBuffers, "%s:%u%s, %d", portString(portIndex), portIndex,
+                graphic ? ", graphic" : "", enable);
     OMX_STRING name = const_cast<OMX_STRING>(
-            "OMX.google.android.index.enableAndroidNativeBuffers");
+            graphic ? "OMX.google.android.index.enableAndroidNativeBuffers"
+                    : "OMX.google.android.index.allocateNativeHandle");
 
     OMX_INDEXTYPE index;
     OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index);
 
-    if (err != OMX_ErrorNone) {
+    if (err == OMX_ErrorNone) {
+        EnableAndroidNativeBuffersParams params;
+        InitOMXParams(&params);
+        params.nPortIndex = portIndex;
+        params.enable = enable;
+
+        err = OMX_SetParameter(mHandle, index, &params);
+        CLOG_IF_ERROR(setParameter, err, "%s(%#x): %s:%u en=%d", name, index,
+                      portString(portIndex), portIndex, enable);
+        if (!graphic) {
+            if (err == OMX_ErrorNone) {
+                mSecureBufferType[portIndex] =
+                    enable ? kSecureBufferTypeNativeHandle : kSecureBufferTypeOpaque;
+            } else if (mSecureBufferType[portIndex] == kSecureBufferTypeUnknown) {
+                mSecureBufferType[portIndex] = kSecureBufferTypeOpaque;
+            }
+        }
+    } else {
         CLOG_ERROR_IF(enable, getExtensionIndex, err, "%s", name);
-        return StatusFromOMXError(err);
+        if (!graphic) {
+            // Extension not supported, check for manual override with system property
+            // This is a temporary workaround until partners support the OMX extension
+            char value[PROPERTY_VALUE_MAX];
+            if (property_get("media.mediadrmservice.enable", value, NULL)
+                && (!strcmp("1", value) || !strcasecmp("true", value))) {
+                CLOG_CONFIG(enableNativeBuffers, "system property override: using native-handles");
+                mSecureBufferType[portIndex] = kSecureBufferTypeNativeHandle;
+            } else if (mSecureBufferType[portIndex] == kSecureBufferTypeUnknown) {
+                mSecureBufferType[portIndex] = kSecureBufferTypeOpaque;
+            }
+            err = OMX_ErrorNone;
+        }
     }
 
-    EnableAndroidNativeBuffersParams params;
-    InitOMXParams(&params);
-    params.nPortIndex = portIndex;
-    params.enable = enable;
-
-    err = OMX_SetParameter(mHandle, index, &params);
-    CLOG_IF_ERROR(setParameter, err, "%s(%#x): %s:%u en=%d", name, index,
-            portString(portIndex), portIndex, enable);
     return StatusFromOMXError(err);
 }
 
@@ -528,6 +536,9 @@
         OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
     if (portIndex != kPortIndexInput && portIndex != kPortIndexOutput) {
         android_errorWriteLog(0x534e4554, "26324358");
+        if (type != NULL) {
+            *type = kMetadataBufferTypeInvalid;
+        }
         return BAD_VALUE;
     }
 
@@ -538,26 +549,32 @@
     OMX_STRING nativeBufferName = const_cast<OMX_STRING>(
             "OMX.google.android.index.storeANWBufferInMetadata");
     MetadataBufferType negotiatedType;
+    MetadataBufferType requestedType = type != NULL ? *type : kMetadataBufferTypeANWBuffer;
 
     StoreMetaDataInBuffersParams params;
     InitOMXParams(&params);
     params.nPortIndex = portIndex;
     params.bStoreMetaData = enable;
 
-    OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, nativeBufferName, &index);
+    OMX_ERRORTYPE err =
+        requestedType == kMetadataBufferTypeANWBuffer
+                ? OMX_GetExtensionIndex(mHandle, nativeBufferName, &index)
+                : OMX_ErrorUnsupportedIndex;
     OMX_ERRORTYPE xerr = err;
     if (err == OMX_ErrorNone) {
         err = OMX_SetParameter(mHandle, index, &params);
         if (err == OMX_ErrorNone) {
             name = nativeBufferName; // set name for debugging
-            negotiatedType = kMetadataBufferTypeANWBuffer;
+            negotiatedType = requestedType;
         }
     }
     if (err != OMX_ErrorNone) {
         err = OMX_GetExtensionIndex(mHandle, name, &index);
         xerr = err;
         if (err == OMX_ErrorNone) {
-            negotiatedType = kMetadataBufferTypeGrallocSource;
+            negotiatedType =
+                requestedType == kMetadataBufferTypeANWBuffer
+                        ? kMetadataBufferTypeGrallocSource : requestedType;
             err = OMX_SetParameter(mHandle, index, &params);
         }
     }
@@ -579,8 +596,9 @@
         }
         mMetadataType[portIndex] = negotiatedType;
     }
-    CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u negotiated %s:%d",
-            portString(portIndex), portIndex, asString(negotiatedType), negotiatedType);
+    CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u %srequested %s:%d negotiated %s:%d",
+            portString(portIndex), portIndex, enable ? "" : "UN",
+            asString(requestedType), requestedType, asString(negotiatedType), negotiatedType);
 
     if (type != NULL) {
         *type = negotiatedType;
@@ -664,6 +682,11 @@
 status_t OMXNodeInstance::useBuffer(
         OMX_U32 portIndex, const sp<IMemory> &params,
         OMX::buffer_id *buffer, OMX_U32 allottedSize) {
+    if (params == NULL || buffer == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock autoLock(mLock);
     if (allottedSize > params->size()) {
         return BAD_VALUE;
@@ -708,6 +731,10 @@
 status_t OMXNodeInstance::useGraphicBuffer2_l(
         OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
         OMX::buffer_id *buffer) {
+    if (graphicBuffer == NULL || buffer == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
 
     // port definition
     OMX_PARAM_PORTDEFINITIONTYPE def;
@@ -760,6 +787,10 @@
 status_t OMXNodeInstance::useGraphicBuffer(
         OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
         OMX::buffer_id *buffer) {
+    if (graphicBuffer == NULL || buffer == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
     Mutex::Autolock autoLock(mLock);
 
     // See if the newer version of the extension is present.
@@ -819,29 +850,39 @@
 
 status_t OMXNodeInstance::updateGraphicBufferInMeta_l(
         OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
-        OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header) {
+        OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header, bool updateCodecBuffer) {
+    // No need to check |graphicBuffer| since NULL is valid for it as below.
     if (header == NULL) {
+        ALOGE("b/25884056");
         return BAD_VALUE;
     }
+
     if (portIndex != kPortIndexInput && portIndex != kPortIndexOutput) {
         return BAD_VALUE;
     }
 
     BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
+    sp<ABuffer> data = bufferMeta->getBuffer(
+            header, !updateCodecBuffer /* backup */, false /* limit */);
     bufferMeta->setGraphicBuffer(graphicBuffer);
-    if (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource
-            && header->nAllocLen >= sizeof(VideoGrallocMetadata)) {
-        VideoGrallocMetadata &metadata = *(VideoGrallocMetadata *)(header->pBuffer);
+    MetadataBufferType metaType = mMetadataType[portIndex];
+    // we use gralloc source only in the codec buffers
+    if (metaType == kMetadataBufferTypeGrallocSource && !updateCodecBuffer) {
+        metaType = kMetadataBufferTypeANWBuffer;
+    }
+    if (metaType == kMetadataBufferTypeGrallocSource
+            && data->capacity() >= sizeof(VideoGrallocMetadata)) {
+        VideoGrallocMetadata &metadata = *(VideoGrallocMetadata *)(data->data());
         metadata.eType = kMetadataBufferTypeGrallocSource;
         metadata.pHandle = graphicBuffer == NULL ? NULL : graphicBuffer->handle;
-    } else if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer
-            && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
-        VideoNativeMetadata &metadata = *(VideoNativeMetadata *)(header->pBuffer);
+    } else if (metaType == kMetadataBufferTypeANWBuffer
+            && data->capacity() >= sizeof(VideoNativeMetadata)) {
+        VideoNativeMetadata &metadata = *(VideoNativeMetadata *)(data->data());
         metadata.eType = kMetadataBufferTypeANWBuffer;
         metadata.pBuffer = graphicBuffer == NULL ? NULL : graphicBuffer->getNativeBuffer();
         metadata.nFenceFd = -1;
     } else {
-        CLOG_BUFFER(updateGraphicBufferInMeta, "%s:%u, %#x bad type (%d) or size (%u)",
+        CLOG_ERROR(updateGraphicBufferInMeta, BAD_VALUE, "%s:%u, %#x bad type (%d) or size (%u)",
             portString(portIndex), portIndex, buffer, mMetadataType[portIndex], header->nAllocLen);
         return BAD_VALUE;
     }
@@ -857,7 +898,47 @@
         OMX::buffer_id buffer) {
     Mutex::Autolock autoLock(mLock);
     OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
-    return updateGraphicBufferInMeta_l(portIndex, graphicBuffer, buffer, header);
+    // update backup buffer for input, codec buffer for output
+    return updateGraphicBufferInMeta_l(
+            portIndex, graphicBuffer, buffer, header,
+            portIndex == kPortIndexOutput /* updateCodecBuffer */);
+}
+
+status_t OMXNodeInstance::updateNativeHandleInMeta(
+        OMX_U32 portIndex, const sp<NativeHandle>& nativeHandle, OMX::buffer_id buffer) {
+    Mutex::Autolock autoLock(mLock);
+    OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
+    // No need to check |nativeHandle| since NULL is valid for it as below.
+    if (header == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
+
+    if (portIndex != kPortIndexInput && portIndex != kPortIndexOutput) {
+        return BAD_VALUE;
+    }
+
+    BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
+    // update backup buffer for input, codec buffer for output
+    sp<ABuffer> data = bufferMeta->getBuffer(
+            header, portIndex == kPortIndexInput /* backup */, false /* limit */);
+    bufferMeta->setNativeHandle(nativeHandle);
+    if (mMetadataType[portIndex] == kMetadataBufferTypeNativeHandleSource
+            && data->capacity() >= sizeof(VideoNativeHandleMetadata)) {
+        VideoNativeHandleMetadata &metadata = *(VideoNativeHandleMetadata *)(data->data());
+        metadata.eType = mMetadataType[portIndex];
+        metadata.pHandle =
+            nativeHandle == NULL ? NULL : const_cast<native_handle*>(nativeHandle->handle());
+    } else {
+        CLOG_ERROR(updateNativeHandleInMeta, BAD_VALUE, "%s:%u, %#x bad type (%d) or size (%zu)",
+            portString(portIndex), portIndex, buffer, mMetadataType[portIndex], data->capacity());
+        return BAD_VALUE;
+    }
+
+    CLOG_BUFFER(updateNativeHandleInMeta, "%s:%u, %#x := %p",
+            portString(portIndex), portIndex, buffer,
+            nativeHandle == NULL ? NULL : nativeHandle->handle());
+    return OK;
 }
 
 status_t OMXNodeInstance::createGraphicBufferSource(
@@ -873,6 +954,9 @@
     }
 
     // Input buffers will hold meta-data (ANativeWindowBuffer references).
+    if (type != NULL) {
+        *type = kMetadataBufferTypeANWBuffer;
+    }
     err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, type);
     if (err != OK) {
         return err;
@@ -922,7 +1006,13 @@
 }
 
 status_t OMXNodeInstance::createInputSurface(
-        OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
+        OMX_U32 portIndex, android_dataspace dataSpace,
+        sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
+    if (bufferProducer == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock autolock(mLock);
     status_t err = createGraphicBufferSource(portIndex, NULL /* bufferConsumer */, type);
 
@@ -930,6 +1020,8 @@
         return err;
     }
 
+    mGraphicBufferSource->setDefaultDataSpace(dataSpace);
+
     *bufferProducer = mGraphicBufferSource->getIGraphicBufferProducer();
     return OK;
 }
@@ -938,6 +1030,10 @@
 status_t OMXNodeInstance::createPersistentInputSurface(
         sp<IGraphicBufferProducer> *bufferProducer,
         sp<IGraphicBufferConsumer> *bufferConsumer) {
+    if (bufferProducer == NULL || bufferConsumer == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
     String8 name("GraphicBufferSource");
 
     sp<IGraphicBufferProducer> producer;
@@ -968,6 +1064,10 @@
     return createGraphicBufferSource(portIndex, bufferConsumer, type);
 }
 
+void OMXNodeInstance::signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
+    mOwner->OnEvent(mNodeID, event, arg1, arg2, NULL);
+}
+
 status_t OMXNodeInstance::signalEndOfInputStream() {
     // For non-Surface input, the MediaCodec should convert the call to a
     // pair of requests (dequeue input buffer, queue input buffer with EOS
@@ -980,9 +1080,14 @@
     return bufferSource->signalEndOfInputStream();
 }
 
-status_t OMXNodeInstance::allocateBuffer(
+status_t OMXNodeInstance::allocateSecureBuffer(
         OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
-        void **buffer_data) {
+        void **buffer_data, sp<NativeHandle> *native_handle) {
+    if (buffer == NULL || buffer_data == NULL || native_handle == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock autoLock(mLock);
 
     BufferMeta *buffer_meta = new BufferMeta(size, portIndex);
@@ -1005,7 +1110,14 @@
     CHECK_EQ(header->pAppPrivate, buffer_meta);
 
     *buffer = makeBufferID(header);
-    *buffer_data = header->pBuffer;
+    if (mSecureBufferType[portIndex] == kSecureBufferTypeNativeHandle) {
+        *buffer_data = NULL;
+        *native_handle = NativeHandle::create(
+                (native_handle_t *)header->pBuffer, false /* ownsHandle */);
+    } else {
+        *buffer_data = header->pBuffer;
+        *native_handle = NULL;
+    }
 
     addActiveBuffer(portIndex, *buffer);
 
@@ -1013,7 +1125,9 @@
     if (bufferSource != NULL && portIndex == kPortIndexInput) {
         bufferSource->addCodecBuffer(header);
     }
-    CLOG_BUFFER(allocateBuffer, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p", size, *buffer_data));
+    CLOG_BUFFER(allocateSecureBuffer, NEW_BUFFER_FMT(
+            *buffer, portIndex, "%zu@%p:%p", size, *buffer_data,
+            *native_handle == NULL ? NULL : (*native_handle)->handle()));
 
     return OK;
 }
@@ -1021,6 +1135,11 @@
 status_t OMXNodeInstance::allocateBufferWithBackup(
         OMX_U32 portIndex, const sp<IMemory> &params,
         OMX::buffer_id *buffer, OMX_U32 allottedSize) {
+    if (params == NULL || buffer == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock autoLock(mLock);
     if (allottedSize > params->size()) {
         return BAD_VALUE;
@@ -1069,6 +1188,7 @@
 
     OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
     if (header == NULL) {
+        ALOGE("b/25884056");
         return BAD_VALUE;
     }
     BufferMeta *buffer_meta = static_cast<BufferMeta *>(header->pAppPrivate);
@@ -1088,6 +1208,7 @@
 
     OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexOutput);
     if (header == NULL) {
+        ALOGE("b/25884056");
         return BAD_VALUE;
     }
     header->nFilledLen = 0;
@@ -1124,6 +1245,7 @@
 
     OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
     if (header == NULL) {
+        ALOGE("b/25884056");
         return BAD_VALUE;
     }
     BufferMeta *buffer_meta =
@@ -1277,9 +1399,16 @@
 status_t OMXNodeInstance::emptyGraphicBuffer(
         OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &graphicBuffer,
         OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+    if (header == NULL) {
+        ALOGE("b/25884056");
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock autoLock(mLock);
     OMX::buffer_id buffer = findBufferID(header);
-    status_t err = updateGraphicBufferInMeta_l(kPortIndexInput, graphicBuffer, buffer, header);
+    status_t err = updateGraphicBufferInMeta_l(
+            kPortIndexInput, graphicBuffer, buffer, header,
+            true /* updateCodecBuffer */);
     if (err != OK) {
         CLOG_ERROR(emptyGraphicBuffer, err, FULL_BUFFER(
                 (intptr_t)header->pBuffer, header, fenceFd));
@@ -1287,7 +1416,13 @@
     }
 
     header->nOffset = 0;
-    header->nFilledLen = graphicBuffer == NULL ? 0 : header->nAllocLen;
+    if (graphicBuffer == NULL) {
+        header->nFilledLen = 0;
+    } else if (mMetadataType[kPortIndexInput] == kMetadataBufferTypeGrallocSource) {
+        header->nFilledLen = sizeof(VideoGrallocMetadata);
+    } else {
+        header->nFilledLen = sizeof(VideoNativeMetadata);
+    }
     return emptyBuffer_l(header, flags, timestamp, (intptr_t)header->pBuffer, fenceFd);
 }
 
@@ -1314,6 +1449,16 @@
     }
 }
 
+template<typename T>
+static bool getInternalOption(
+        const void *data, size_t size, T *out) {
+    if (size != sizeof(T)) {
+        return false;
+    }
+    *out = *(T*)data;
+    return true;
+}
+
 status_t OMXNodeInstance::setInternalOption(
         OMX_U32 portIndex,
         IOMX::InternalOptionType type,
@@ -1328,6 +1473,7 @@
         case IOMX::INTERNAL_OPTION_MAX_FPS:
         case IOMX::INTERNAL_OPTION_START_TIME:
         case IOMX::INTERNAL_OPTION_TIME_LAPSE:
+        case IOMX::INTERNAL_OPTION_COLOR_ASPECTS:
         {
             const sp<GraphicBufferSource> &bufferSource =
                 getGraphicBufferSource();
@@ -1338,58 +1484,63 @@
             }
 
             if (type == IOMX::INTERNAL_OPTION_SUSPEND) {
-                if (size != sizeof(bool)) {
+                bool suspend;
+                if (!getInternalOption(data, size, &suspend)) {
                     return INVALID_OPERATION;
                 }
 
-                bool suspend = *(bool *)data;
                 CLOG_CONFIG(setInternalOption, "suspend=%d", suspend);
                 bufferSource->suspend(suspend);
-            } else if (type ==
-                    IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY){
-                if (size != sizeof(int64_t)) {
+            } else if (type == IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY) {
+                int64_t delayUs;
+                if (!getInternalOption(data, size, &delayUs)) {
                     return INVALID_OPERATION;
                 }
 
-                int64_t delayUs = *(int64_t *)data;
                 CLOG_CONFIG(setInternalOption, "delayUs=%lld", (long long)delayUs);
                 return bufferSource->setRepeatPreviousFrameDelayUs(delayUs);
-            } else if (type ==
-                    IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP){
-                if (size != sizeof(int64_t)) {
+            } else if (type == IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP) {
+                int64_t maxGapUs;
+                if (!getInternalOption(data, size, &maxGapUs)) {
                     return INVALID_OPERATION;
                 }
 
-                int64_t maxGapUs = *(int64_t *)data;
                 CLOG_CONFIG(setInternalOption, "gapUs=%lld", (long long)maxGapUs);
                 return bufferSource->setMaxTimestampGapUs(maxGapUs);
             } else if (type == IOMX::INTERNAL_OPTION_MAX_FPS) {
-                if (size != sizeof(float)) {
+                float maxFps;
+                if (!getInternalOption(data, size, &maxFps)) {
                     return INVALID_OPERATION;
                 }
 
-                float maxFps = *(float *)data;
                 CLOG_CONFIG(setInternalOption, "maxFps=%f", maxFps);
                 return bufferSource->setMaxFps(maxFps);
             } else if (type == IOMX::INTERNAL_OPTION_START_TIME) {
-                if (size != sizeof(int64_t)) {
+                int64_t skipFramesBeforeUs;
+                if (!getInternalOption(data, size, &skipFramesBeforeUs)) {
                     return INVALID_OPERATION;
                 }
 
-                int64_t skipFramesBeforeUs = *(int64_t *)data;
                 CLOG_CONFIG(setInternalOption, "beforeUs=%lld", (long long)skipFramesBeforeUs);
                 bufferSource->setSkipFramesBeforeUs(skipFramesBeforeUs);
-            } else { // IOMX::INTERNAL_OPTION_TIME_LAPSE
-                if (size != sizeof(int64_t) * 2) {
+            } else if (type == IOMX::INTERNAL_OPTION_TIME_LAPSE) {
+                GraphicBufferSource::TimeLapseConfig config;
+                if (!getInternalOption(data, size, &config)) {
                     return INVALID_OPERATION;
                 }
 
-                int64_t timePerFrameUs = ((int64_t *)data)[0];
-                int64_t timePerCaptureUs = ((int64_t *)data)[1];
                 CLOG_CONFIG(setInternalOption, "perFrameUs=%lld perCaptureUs=%lld",
-                        (long long)timePerFrameUs, (long long)timePerCaptureUs);
+                        (long long)config.mTimePerFrameUs, (long long)config.mTimePerCaptureUs);
 
-                bufferSource->setTimeLapseUs((int64_t *)data);
+                return bufferSource->setTimeLapseConfig(config);
+            } else if (type == IOMX::INTERNAL_OPTION_COLOR_ASPECTS) {
+                ColorAspects aspects;
+                if (!getInternalOption(data, size, &aspects)) {
+                    return INVALID_OPERATION;
+                }
+
+                CLOG_CONFIG(setInternalOption, "setting color aspects");
+                bufferSource->setColorAspects(aspects);
             }
 
             return OK;
@@ -1407,6 +1558,7 @@
         OMX_BUFFERHEADERTYPE *buffer =
             findBufferHeader(msg.u.extended_buffer_data.buffer, kPortIndexOutput);
         if (buffer == NULL) {
+            ALOGE("b/25884056");
             return false;
         }
 
@@ -1550,6 +1702,10 @@
         OMX_IN OMX_U32 nData1,
         OMX_IN OMX_U32 nData2,
         OMX_IN OMX_PTR pEventData) {
+    if (pAppData == NULL) {
+        ALOGE("b/25884056");
+        return OMX_ErrorBadParameter;
+    }
     OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
     if (instance->mDying) {
         return OMX_ErrorNone;
@@ -1563,6 +1719,10 @@
         OMX_IN OMX_HANDLETYPE /* hComponent */,
         OMX_IN OMX_PTR pAppData,
         OMX_IN OMX_BUFFERHEADERTYPE* pBuffer) {
+    if (pAppData == NULL) {
+        ALOGE("b/25884056");
+        return OMX_ErrorBadParameter;
+    }
     OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
     if (instance->mDying) {
         return OMX_ErrorNone;
@@ -1577,6 +1737,10 @@
         OMX_IN OMX_HANDLETYPE /* hComponent */,
         OMX_IN OMX_PTR pAppData,
         OMX_IN OMX_BUFFERHEADERTYPE* pBuffer) {
+    if (pAppData == NULL) {
+        ALOGE("b/25884056");
+        return OMX_ErrorBadParameter;
+    }
     OMXNodeInstance *instance = static_cast<OMXNodeInstance *>(pAppData);
     if (instance->mDying) {
         return OMX_ErrorNone;
@@ -1617,7 +1781,8 @@
 void OMXNodeInstance::freeActiveBuffers() {
     // Make sure to count down here, as freeBuffer will in turn remove
     // the active buffer from the vector...
-    for (size_t i = mActiveBuffers.size(); i--;) {
+    for (size_t i = mActiveBuffers.size(); i > 0;) {
+        i--;
         freeBuffer(mActiveBuffers[i].mPortIndex, mActiveBuffers[i].mID);
     }
 }
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
new file mode 100644
index 0000000..799696c
--- /dev/null
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "OMXUtils"
+
+#include <string.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include "OMXUtils.h"
+
+namespace android {
+
+status_t StatusFromOMXError(OMX_ERRORTYPE err) {
+    switch (err) {
+        case OMX_ErrorNone:
+            return OK;
+        case OMX_ErrorUnsupportedSetting:
+        case OMX_ErrorUnsupportedIndex:
+            return ERROR_UNSUPPORTED; // this is a media specific error
+        case OMX_ErrorInsufficientResources:
+            return NO_MEMORY;
+        case OMX_ErrorInvalidComponentName:
+        case OMX_ErrorComponentNotFound:
+            return NAME_NOT_FOUND;
+        default:
+            return UNKNOWN_ERROR;
+    }
+}
+
+/**************************************************************************************************/
+
+DescribeColorFormatParams::DescribeColorFormatParams(const DescribeColorFormat2Params &params) {
+    InitOMXParams(this);
+
+    eColorFormat = params.eColorFormat;
+    nFrameWidth = params.nFrameWidth;
+    nFrameHeight = params.nFrameHeight;
+    nStride = params.nStride;
+    nSliceHeight = params.nSliceHeight;
+    bUsingNativeBuffers = params.bUsingNativeBuffers;
+    // we don't copy media images as this conversion is only used pre-query
+};
+
+void DescribeColorFormat2Params::initFromV1(const DescribeColorFormatParams &params) {
+    InitOMXParams(this);
+
+    eColorFormat = params.eColorFormat;
+    nFrameWidth = params.nFrameWidth;
+    nFrameHeight = params.nFrameHeight;
+    nStride = params.nStride;
+    nSliceHeight = params.nSliceHeight;
+    bUsingNativeBuffers = params.bUsingNativeBuffers;
+    sMediaImage.initFromV1(params.sMediaImage);
+};
+
+void MediaImage2::initFromV1(const MediaImage &image) {
+    memset(this, 0, sizeof(*this));
+
+    if (image.mType != MediaImage::MEDIA_IMAGE_TYPE_YUV) {
+        mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+        return;
+    }
+
+    for (size_t ix = 0; ix < image.mNumPlanes; ++ix) {
+        if (image.mPlane[ix].mHorizSubsampling > INT32_MAX
+                || image.mPlane[ix].mVertSubsampling > INT32_MAX) {
+            mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+            return;
+        }
+    }
+
+    mType = (MediaImage2::Type)image.mType;
+    mNumPlanes = image.mNumPlanes;
+    mWidth = image.mWidth;
+    mHeight = image.mHeight;
+    mBitDepth = image.mBitDepth;
+    mBitDepthAllocated = 8;
+    for (size_t ix = 0; ix < image.mNumPlanes; ++ix) {
+        mPlane[ix].mOffset = image.mPlane[ix].mOffset;
+        mPlane[ix].mColInc = image.mPlane[ix].mColInc;
+        mPlane[ix].mRowInc = image.mPlane[ix].mRowInc;
+        mPlane[ix].mHorizSubsampling = (int32_t)image.mPlane[ix].mHorizSubsampling;
+        mPlane[ix].mVertSubsampling = (int32_t)image.mPlane[ix].mVertSubsampling;
+    }
+}
+
+/**************************************************************************************************/
+
+}  // namespace android
+
diff --git a/media/libstagefright/omx/OMXUtils.h b/media/libstagefright/omx/OMXUtils.h
new file mode 100644
index 0000000..0c5e537
--- /dev/null
+++ b/media/libstagefright/omx/OMXUtils.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OMX_UTILS_H_
+#define OMX_UTILS_H_
+
+/***** DO NOT USE THIS INCLUDE!!! INTERAL ONLY!!! UNLESS YOU RESIDE IN media/libstagefright *****/
+
+// OMXUtils contains omx-specific utility functions for stagefright/omx library
+// TODO: move ACodec and OMXClient into this library
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+    memset(params, 0, sizeof(T));
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+status_t StatusFromOMXError(OMX_ERRORTYPE err);
+
+}  // namespace android
+
+#endif
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 19dde83..d3553bd 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -414,6 +414,14 @@
             uint32_t oldHeight = def->format.video.nFrameHeight;
             uint32_t newWidth = video_def->nFrameWidth;
             uint32_t newHeight = video_def->nFrameHeight;
+            // We need width, height, stride and slice-height to be non-zero and sensible.
+            // These values were chosen to prevent integer overflows further down the line, and do
+            // not indicate support for 32kx32k video.
+            if (newWidth > 32768 || newHeight > 32768
+                    || video_def->nStride > 32768 || video_def->nSliceHeight > 32768) {
+                ALOGE("b/22885421");
+                return OMX_ErrorBadParameter;
+            }
             if (newWidth != oldWidth || newHeight != oldHeight) {
                 bool outputPort = (newParams->nPortIndex == kOutputPortIndex);
                 if (outputPort) {
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index 5239bc8..0f9c118 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -23,7 +23,6 @@
 
 #include "include/SoftVideoEncoderOMXComponent.h"
 
-#include <hardware/gralloc.h>
 #include <media/hardware/HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
@@ -71,7 +70,6 @@
       mBitrate(192000),
       mFramerate(30 << 16), // Q16 format
       mColorFormat(OMX_COLOR_FormatYUV420Planar),
-      mGrallocModule(NULL),
       mMinOutputBufferSize(384), // arbitrary, using one uncompressed macroblock
       mMinCompressionRatio(1),   // max output size is normally the input size
       mComponentRole(componentRole),
@@ -383,6 +381,7 @@
 }
 
 // static
+__attribute__((no_sanitize("integer")))
 void SoftVideoEncoderOMXComponent::ConvertFlexYUVToPlanar(
         uint8_t *dst, size_t dstStride, size_t dstVStride,
         struct android_ycbcr *ycbcr, int32_t width, int32_t height) {
@@ -425,6 +424,7 @@
 }
 
 // static
+__attribute__((no_sanitize("integer")))
 void SoftVideoEncoderOMXComponent::ConvertYUV420SemiPlanarToYUV420Planar(
         const uint8_t *inYVU, uint8_t* outYUV, int32_t width, int32_t height) {
     // TODO: add support for stride
@@ -455,6 +455,7 @@
 }
 
 // static
+__attribute__((no_sanitize("integer")))
 void SoftVideoEncoderOMXComponent::ConvertRGB32ToPlanar(
         uint8_t *dstY, size_t dstStride, size_t dstVStride,
         const uint8_t *src, size_t width, size_t height, size_t srcStride,
@@ -522,13 +523,6 @@
         return NULL;
     }
 
-    if (mGrallocModule == NULL) {
-        CHECK_EQ(0, hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &mGrallocModule));
-    }
-
-    const gralloc_module_t *grmodule =
-        (const gralloc_module_t *)mGrallocModule;
-
     buffer_handle_t handle;
     int format;
     size_t srcStride;
@@ -586,19 +580,21 @@
         return NULL;
     }
 
+    auto& mapper = GraphicBufferMapper::get();
+
     void *bits = NULL;
     struct android_ycbcr ycbcr;
     status_t res;
     if (format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
-        res = grmodule->lock_ycbcr(
-                 grmodule, handle,
+        res = mapper.lockYCbCr(
+                 handle,
                  GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_NEVER,
-                 0, 0, width, height, &ycbcr);
+                 Rect(width, height), &ycbcr);
     } else {
-        res = grmodule->lock(
-                 grmodule, handle,
+        res = mapper.lock(
+                 handle,
                  GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_NEVER,
-                 0, 0, width, height, &bits);
+                 Rect(width, height), &bits);
     }
     if (res != OK) {
         ALOGE("Unable to lock image buffer %p for access", handle);
@@ -629,6 +625,7 @@
         case HAL_PIXEL_FORMAT_YCbCr_420_888:
             ConvertFlexYUVToPlanar(dst, dstStride, dstVStride, &ycbcr, width, height);
             break;
+        case HAL_PIXEL_FORMAT_RGBX_8888:
         case HAL_PIXEL_FORMAT_RGBA_8888:
         case HAL_PIXEL_FORMAT_BGRA_8888:
             ConvertRGB32ToPlanar(
@@ -642,7 +639,7 @@
             break;
     }
 
-    if (grmodule->unlock(grmodule, handle) != OK) {
+    if (mapper.unlock(handle) != OK) {
         ALOGE("Unable to unlock image buffer %p for access", handle);
     }
 
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 644b6ed..50bb0de 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -27,7 +27,7 @@
 #include <binder/IServiceManager.h>
 #include <binder/MemoryDealer.h>
 #include <media/IMediaHTTPService.h>
-#include <media/IMediaPlayerService.h>
+#include <media/IMediaCodecService.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/DataSource.h>
@@ -37,7 +37,7 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/SimpleDecodingSource.h>
 
 #define DEFAULT_TIMEOUT         500000
 
@@ -57,8 +57,8 @@
 
 status_t Harness::initOMX() {
     sp<IServiceManager> sm = defaultServiceManager();
-    sp<IBinder> binder = sm->getService(String16("media.player"));
-    sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
+    sp<IBinder> binder = sm->getService(String16("media.codec"));
+    sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
     mOMX = service->getOMX();
 
     return mOMX != 0 ? OK : NO_INIT;
@@ -244,7 +244,7 @@
     NodeReaper &operator=(const NodeReaper &);
 };
 
-static sp<MediaExtractor> CreateExtractorFromURI(const char *uri) {
+static sp<IMediaExtractor> CreateExtractorFromURI(const char *uri) {
     sp<DataSource> source =
         DataSource::CreateFromURI(NULL /* httpService */, uri);
 
@@ -267,7 +267,7 @@
     IOMX::node_id node;
 
     status_t err =
-        mOMX->allocateNode(componentName, this, &node);
+        mOMX->allocateNode(componentName, this, NULL, &node);
     EXPECT_SUCCESS(err, "allocateNode");
 
     NodeReaper reaper(this, node);
@@ -492,14 +492,14 @@
     return NULL;
 }
 
-static sp<MediaSource> CreateSourceForMime(const char *mime) {
+static sp<IMediaSource> CreateSourceForMime(const char *mime) {
     const char *url = GetURLForMime(mime);
 
     if (url == NULL) {
         return NULL;
     }
 
-    sp<MediaExtractor> extractor = CreateExtractorFromURI(url);
+    sp<IMediaExtractor> extractor = CreateExtractorFromURI(url);
 
     if (extractor == NULL) {
         return NULL;
@@ -559,7 +559,7 @@
         return OK;
     }
 
-    sp<MediaSource> source = CreateSourceForMime(mime);
+    sp<IMediaSource> source = CreateSourceForMime(mime);
 
     if (source == NULL) {
         printf("  * Unable to open test content for type '%s', "
@@ -569,16 +569,15 @@
         return OK;
     }
 
-    sp<MediaSource> seekSource = CreateSourceForMime(mime);
+    sp<IMediaSource> seekSource = CreateSourceForMime(mime);
     if (source == NULL || seekSource == NULL) {
         return UNKNOWN_ERROR;
     }
 
     CHECK_EQ(seekSource->start(), (status_t)OK);
 
-    sp<MediaSource> codec = OMXCodec::Create(
-            mOMX, source->getFormat(), false /* createEncoder */,
-            source, componentName);
+    sp<IMediaSource> codec = SimpleDecodingSource::Create(
+            source, 0 /* flags */, NULL /* nativeWindow */, componentName);
 
     CHECK(codec != NULL);
 
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
index a1a6576..82a0631 100644
--- a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
@@ -379,7 +379,10 @@
                 unsigned muxSlotLengthBytes = 0;
                 unsigned tmp;
                 do {
-                    CHECK_LT(offset, buffer->size());
+                    if (offset >= buffer->size()) {
+                        ALOGW("Malformed buffer received");
+                        return out;
+                    }
                     tmp = ptr[offset++];
                     muxSlotLengthBytes += tmp;
                 } while (tmp == 0xff);
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index d7c3bd6..576a0a4 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -116,8 +116,15 @@
     // to the highest sequence number (extended to 32 bits) received so far.
 
     uint32_t seq1 = seqNum | (mHighestSeqNumber & 0xffff0000);
-    uint32_t seq2 = seqNum | ((mHighestSeqNumber & 0xffff0000) + 0x10000);
-    uint32_t seq3 = seqNum | ((mHighestSeqNumber & 0xffff0000) - 0x10000);
+
+    // non-overflowing version of:
+    // uint32_t seq2 = seqNum | ((mHighestSeqNumber & 0xffff0000) + 0x10000);
+    uint32_t seq2 = seqNum | (((mHighestSeqNumber >> 16) + 1) << 16);
+
+    // non-underflowing version of:
+    // uint32_t seq2 = seqNum | ((mHighestSeqNumber & 0xffff0000) - 0x10000);
+    uint32_t seq3 = seqNum | ((((mHighestSeqNumber >> 16) | 0x10000) - 1) << 16);
+
     uint32_t diff1 = AbsDiff(seq1, mHighestSeqNumber);
     uint32_t diff2 = AbsDiff(seq2, mHighestSeqNumber);
     uint32_t diff3 = AbsDiff(seq3, mHighestSeqNumber);
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index 56c4aa6..1f6b6f7 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -104,7 +104,7 @@
     mFd = -1;
 }
 
-status_t ARTPWriter::addSource(const sp<MediaSource> &source) {
+status_t ARTPWriter::addSource(const sp<IMediaSource> &source) {
     mSource = source;
     return OK;
 }
diff --git a/media/libstagefright/rtsp/ARTPWriter.h b/media/libstagefright/rtsp/ARTPWriter.h
index be8bc13..62abd0a 100644
--- a/media/libstagefright/rtsp/ARTPWriter.h
+++ b/media/libstagefright/rtsp/ARTPWriter.h
@@ -37,7 +37,7 @@
 struct ARTPWriter : public MediaWriter {
     ARTPWriter(int fd);
 
-    virtual status_t addSource(const sp<MediaSource> &source);
+    virtual status_t addSource(const sp<IMediaSource> &source);
     virtual bool reachedEOS();
     virtual status_t start(MetaData *params);
     virtual status_t stop();
@@ -72,7 +72,7 @@
     int mRTCPFd;
 #endif
 
-    sp<MediaSource> mSource;
+    sp<IMediaSource> mSource;
     sp<ALooper> mLooper;
     sp<AHandlerReflector<ARTPWriter> > mReflector;
 
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 855ffdc..5620cf8 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -898,11 +898,6 @@
     if (!strncmp(value.c_str(), "Basic", 5)) {
         mAuthType = BASIC;
     } else {
-#if !defined(HAVE_ANDROID_OS)
-        // We don't have access to the MD5 implementation on the simulator,
-        // so we won't support digest authentication.
-        return false;
-#endif
 
         CHECK(!strncmp(value.c_str(), "Digest", 6));
         mAuthType = DIGEST;
@@ -919,7 +914,6 @@
     return true;
 }
 
-#if defined(HAVE_ANDROID_OS)
 static void H(const AString &s, AString *out) {
     out->clear();
 
@@ -948,7 +942,6 @@
         out->append(&nibble, 1);
     }
 }
-#endif
 
 static void GetMethodAndURL(
         const AString &request, AString *method, AString *url) {
@@ -990,7 +983,6 @@
         return;
     }
 
-#if defined(HAVE_ANDROID_OS)
     CHECK_EQ((int)mAuthType, (int)DIGEST);
 
     AString method, url;
@@ -1039,7 +1031,6 @@
     fragment.append("\r\n");
 
     request->insert(fragment, i + 2);
-#endif
 }
 
 void ARTSPConnection::addUserAgent(AString *request) const {
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index c5e8c35..bdda19c 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -33,6 +33,7 @@
 
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
 
@@ -42,21 +43,23 @@
 
 include $(CLEAR_VARS)
 
-LOCAL_SRC_FILES:=         \
-        rtp_test.cpp
+LOCAL_SRC_FILES := \
+	rtp_test.cpp \
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder libstagefright_foundation
+	libstagefright liblog libutils libbinder libstagefright_foundation libmedia
 
 LOCAL_STATIC_LIBRARIES := \
-        libstagefright_rtsp
+	libstagefright_rtsp
 
-LOCAL_C_INCLUDES:= \
+LOCAL_C_INCLUDES := \
 	frameworks/av/media/libstagefright \
+	frameworks/av/cmds/stagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE_TAGS := optional
 
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 0d0baf3..f9a9ab9 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -64,6 +64,10 @@
 
 static int64_t kPauseDelayUs = 3000000ll;
 
+// The allowed maximum number of stale access units at the beginning of
+// a new sequence.
+static int32_t kMaxAllowedStaleAccessUnits = 20;
+
 namespace android {
 
 static bool GetAttribute(const char *s, const char *key, AString *value) {
@@ -235,7 +239,7 @@
         sp<AMessage> msg = new AMessage('paus', this);
         mPauseGeneration++;
         msg->setInt32("pausecheck", mPauseGeneration);
-        msg->post(kPauseDelayUs);
+        msg->post();
     }
 
     void resume() {
@@ -979,6 +983,11 @@
 
             case 'accu':
             {
+                if (mSeekPending) {
+                    ALOGV("Stale access unit.");
+                    break;
+                }
+
                 int32_t timeUpdate;
                 if (msg->findInt32("time-update", &timeUpdate) && timeUpdate) {
                     size_t trackIndex;
@@ -1043,16 +1052,39 @@
                     break;
                 }
 
+                if (track->mNewSegment) {
+                    // The sequence number from RTP packet has only 16 bits and is extended
+                    // by ARTPSource. Only the low 16 bits of seq in RTP-Info of reply of
+                    // RTSP "PLAY" command should be used to detect the first RTP packet
+                    // after seeking.
+                    if (track->mAllowedStaleAccessUnits > 0) {
+                        if ((((seqNum ^ track->mFirstSeqNumInSegment) & 0xffff) != 0)) {
+                            // Not the first rtp packet of the stream after seeking, discarding.
+                            track->mAllowedStaleAccessUnits--;
+                            ALOGV("discarding stale access unit (0x%x : 0x%x)",
+                                 seqNum, track->mFirstSeqNumInSegment);
+                            break;
+                        }
+                    } else { // track->mAllowedStaleAccessUnits <= 0
+                        mNumAccessUnitsReceived = 0;
+                        ALOGW_IF(track->mAllowedStaleAccessUnits == 0,
+                             "Still no first rtp packet after %d stale ones",
+                             kMaxAllowedStaleAccessUnits);
+                        track->mAllowedStaleAccessUnits = -1;
+                        break;
+                    }
+
+                    // Now found the first rtp packet of the stream after seeking.
+                    track->mFirstSeqNumInSegment = seqNum;
+                    track->mNewSegment = false;
+                }
+
                 if (seqNum < track->mFirstSeqNumInSegment) {
                     ALOGV("dropping stale access-unit (%d < %d)",
                          seqNum, track->mFirstSeqNumInSegment);
                     break;
                 }
 
-                if (track->mNewSegment) {
-                    track->mNewSegment = false;
-                }
-
                 onAccessUnitComplete(trackIndex, accessUnit);
                 break;
             }
@@ -1070,6 +1102,12 @@
                     ALOGW("This is a live stream, ignoring pause request.");
                     break;
                 }
+
+                if (mPausing) {
+                    ALOGV("This stream is already paused.");
+                    break;
+                }
+
                 mCheckPending = true;
                 ++mCheckGeneration;
                 mPausing = true;
@@ -1131,10 +1169,11 @@
                 int32_t result;
                 CHECK(msg->findInt32("result", &result));
 
-                ALOGI("PLAY completed with result %d (%s)",
+                ALOGI("PLAY (for resume) completed with result %d (%s)",
                      result, strerror(-result));
 
                 mCheckPending = false;
+                ++mCheckGeneration;
                 postAccessUnitTimeoutCheck();
 
                 if (result == OK) {
@@ -1282,10 +1321,11 @@
                 int32_t result;
                 CHECK(msg->findInt32("result", &result));
 
-                ALOGI("PLAY completed with result %d (%s)",
+                ALOGI("PLAY (for seek) completed with result %d (%s)",
                      result, strerror(-result));
 
                 mCheckPending = false;
+                ++mCheckGeneration;
                 postAccessUnitTimeoutCheck();
 
                 if (result == OK) {
@@ -1323,6 +1363,12 @@
                 mPausing = false;
                 mSeekPending = false;
 
+                // Discard all stale access units.
+                for (size_t i = 0; i < mTracks.size(); ++i) {
+                    TrackInfo *track = &mTracks.editItemAt(i);
+                    track->mPackets.clear();
+                }
+
                 sp<AMessage> msg = mNotify->dup();
                 msg->setInt32("what", kWhatSeekDone);
                 msg->post();
@@ -1484,6 +1530,7 @@
             TrackInfo *info = &mTracks.editItemAt(trackIndex);
             info->mFirstSeqNumInSegment = seq;
             info->mNewSegment = true;
+            info->mAllowedStaleAccessUnits = kMaxAllowedStaleAccessUnits;
 
             CHECK(GetAttribute((*it).c_str(), "rtptime", &val));
 
@@ -1527,6 +1574,7 @@
         bool mUsingInterleavedTCP;
         uint32_t mFirstSeqNumInSegment;
         bool mNewSegment;
+        int32_t mAllowedStaleAccessUnits;
 
         uint32_t mRTPAnchor;
         int64_t mNTPAnchorUs;
@@ -1610,6 +1658,7 @@
         info->mUsingInterleavedTCP = false;
         info->mFirstSeqNumInSegment = 0;
         info->mNewSegment = true;
+        info->mAllowedStaleAccessUnits = kMaxAllowedStaleAccessUnits;
         info->mRTPSocket = -1;
         info->mRTCPSocket = -1;
         info->mRTPAnchor = 0;
diff --git a/media/libstagefright/rtsp/MyTransmitter.h b/media/libstagefright/rtsp/MyTransmitter.h
index 369f276..bf44aff 100644
--- a/media/libstagefright/rtsp/MyTransmitter.h
+++ b/media/libstagefright/rtsp/MyTransmitter.h
@@ -31,9 +31,10 @@
 
 #ifdef ANDROID
 #include "VideoSource.h"
-
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodecSource.h>
 #endif
 
 namespace android {
@@ -109,17 +110,19 @@
 
         sp<MediaSource> source = new VideoSource(width, height);
 
-        sp<MetaData> encMeta = new MetaData;
-        encMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-        encMeta->setInt32(kKeyWidth, width);
-        encMeta->setInt32(kKeyHeight, height);
+        sp<AMessage> encMeta = new AMessage;
+        encMeta->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
+        encMeta->setInt32("width", width);
+        encMeta->setInt32("height", height);
+        encMeta->setInt32("frame-rate", 30);
+        encMeta->setInt32("bitrate", 256000);
+        encMeta->setInt32("i-frame-interval", 10);
 
-        OMXClient client;
-        client.connect();
+        sp<ALooper> encLooper = new ALooper;
+        encLooper->setName("rtsp_transmitter");
+        encLooper->start();
 
-        mEncoder = OMXCodec::Create(
-                client.interface(), encMeta,
-                true /* createEncoder */, source);
+        mEncoder = MediaCodecSource::Create(encLooper, encMeta, source);
 
         mEncoder->start();
 
diff --git a/media/libstagefright/rtsp/rtp_test.cpp b/media/libstagefright/rtsp/rtp_test.cpp
index d43cd2a..24f529b 100644
--- a/media/libstagefright/rtsp/rtp_test.cpp
+++ b/media/libstagefright/rtsp/rtp_test.cpp
@@ -20,13 +20,13 @@
 
 #include <binder/ProcessState.h>
 
+#include <media/stagefright/foundation/base64.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
-#include <media/stagefright/foundation/base64.h>
+#include <media/stagefright/SimpleDecodingSource.h>
 
 #include "ARTPSession.h"
 #include "ASessionDescription.h"
@@ -178,15 +178,8 @@
     CHECK_EQ(session->countTracks(), 1u);
     sp<MediaSource> source = session->trackAt(0);
 
-    OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
-
-    sp<MediaSource> decoder = OMXCodec::Create(
-            client.interface(),
-            source->getFormat(), false /* createEncoder */,
-            source,
-            NULL,
-            0);  // OMXCodec::kPreferSoftwareCodecs);
+    sp<MediaSource> decoder = SimpleDecodingSource::Create(
+            source, 0 /* flags: ACodec::kPreferSoftwareCodecs */);
     CHECK(decoder != NULL);
 
     CHECK_EQ(decoder->start(), (status_t)OK);
@@ -213,7 +206,7 @@
             int64_t timeUs;
             CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
 
-            printf("decoder returned frame of size %d at time %.2f secs\n",
+            printf("decoder returned frame of size %zu at time %.2f secs\n",
                    buffer->range_length(), timeUs / 1E6);
         }
 #endif
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index 111e6c5..d1c9d36 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -30,6 +30,7 @@
 	frameworks/av/media/libstagefright \
 	frameworks/av/media/libstagefright/include \
 	$(TOP)/frameworks/native/include/media/openmax \
+	$(TOP)/frameworks/native/include/media/hardware \
 
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index 3860e9b..ad1e684 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -38,11 +38,6 @@
 #include <binder/ProcessState.h>
 
 #include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/OMXClient.h>
-#include <media/stagefright/OMXCodec.h>
 #include <OMX_Component.h>
 
 #include "DummyRecorder.h"
diff --git a/media/libstagefright/timedtext/Android.mk b/media/libstagefright/timedtext/Android.mk
index 58fb12f..f2c6365 100644
--- a/media/libstagefright/timedtext/Android.mk
+++ b/media/libstagefright/timedtext/Android.mk
@@ -3,14 +3,10 @@
 
 LOCAL_SRC_FILES:=                 \
         TextDescriptions.cpp      \
-        TimedTextDriver.cpp       \
-        TimedText3GPPSource.cpp \
-        TimedTextSource.cpp       \
-        TimedTextSRTSource.cpp    \
-        TimedTextPlayer.cpp
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_C_INCLUDES:= \
         $(TOP)/frameworks/av/include/media/stagefright/timedtext \
diff --git a/media/libstagefright/timedtext/TimedText3GPPSource.cpp b/media/libstagefright/timedtext/TimedText3GPPSource.cpp
deleted file mode 100644
index 4854121..0000000
--- a/media/libstagefright/timedtext/TimedText3GPPSource.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
- /*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TimedText3GPPSource"
-#include <utils/Log.h>
-
-#include <binder/Parcel.h>
-#include <media/stagefright/foundation/ADebug.h>  // CHECK_XX macro
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>  // for MEDIA_MIMETYPE_xxx
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-#include "TimedText3GPPSource.h"
-#include "TextDescriptions.h"
-
-namespace android {
-
-TimedText3GPPSource::TimedText3GPPSource(const sp<MediaSource>& mediaSource)
-    : mSource(mediaSource) {
-}
-
-TimedText3GPPSource::~TimedText3GPPSource() {
-}
-
-status_t TimedText3GPPSource::read(
-        int64_t *startTimeUs, int64_t *endTimeUs, Parcel *parcel,
-        const MediaSource::ReadOptions *options) {
-    MediaBuffer *textBuffer = NULL;
-    status_t err = mSource->read(&textBuffer, options);
-    if (err != OK) {
-        return err;
-    }
-    CHECK(textBuffer != NULL);
-    textBuffer->meta_data()->findInt64(kKeyTime, startTimeUs);
-    CHECK_GE(*startTimeUs, 0);
-    extractAndAppendLocalDescriptions(*startTimeUs, textBuffer, parcel);
-    textBuffer->release();
-    // endTimeUs is a dummy parameter for 3gpp timed text format.
-    // Set a negative value to it to mark it is unavailable.
-    *endTimeUs = -1;
-    return OK;
-}
-
-// Each text sample consists of a string of text, optionally with sample
-// modifier description. The modifier description could specify a new
-// text style for the string of text. These descriptions are present only
-// if they are needed. This method is used to extract the modifier
-// description and append it at the end of the text.
-status_t TimedText3GPPSource::extractAndAppendLocalDescriptions(
-        int64_t timeUs, const MediaBuffer *textBuffer, Parcel *parcel) {
-    const void *data;
-    size_t size = 0;
-    int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS;
-
-    const char *mime;
-    CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
-    CHECK(strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0);
-
-    data = textBuffer->data();
-    size = textBuffer->size();
-
-    if (size > 0) {
-      parcel->freeData();
-      flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
-      return TextDescriptions::getParcelOfDescriptions(
-          (const uint8_t *)data, size, flag, timeUs / 1000, parcel);
-    }
-    return OK;
-}
-
-// To extract and send the global text descriptions for all the text samples
-// in the text track or text file.
-// TODO: send error message to application via notifyListener()...?
-status_t TimedText3GPPSource::extractGlobalDescriptions(Parcel *parcel) {
-    const void *data;
-    size_t size = 0;
-    int32_t flag = TextDescriptions::GLOBAL_DESCRIPTIONS;
-
-    const char *mime;
-    CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
-    CHECK(strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0);
-
-    uint32_t type;
-    // get the 'tx3g' box content. This box contains the text descriptions
-    // used to render the text track
-    if (!mSource->getFormat()->findData(
-            kKeyTextFormatData, &type, &data, &size)) {
-        return ERROR_MALFORMED;
-    }
-
-    if (size > 0) {
-        flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
-        return TextDescriptions::getParcelOfDescriptions(
-                (const uint8_t *)data, size, flag, 0, parcel);
-    }
-    return OK;
-}
-
-sp<MetaData> TimedText3GPPSource::getFormat() {
-    return mSource->getFormat();
-}
-
-}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedText3GPPSource.h b/media/libstagefright/timedtext/TimedText3GPPSource.h
deleted file mode 100644
index 4170940..0000000
--- a/media/libstagefright/timedtext/TimedText3GPPSource.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIMED_TEXT_3GPP_SOURCE_H_
-#define TIMED_TEXT_3GPP_SOURCE_H_
-
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-
-#include "TimedTextSource.h"
-
-namespace android {
-
-class MediaBuffer;
-class Parcel;
-
-class TimedText3GPPSource : public TimedTextSource {
-public:
-    TimedText3GPPSource(const sp<MediaSource>& mediaSource);
-    virtual status_t start() { return mSource->start(); }
-    virtual status_t stop() { return mSource->stop(); }
-    virtual status_t read(
-            int64_t *startTimeUs,
-            int64_t *endTimeUs,
-            Parcel *parcel,
-            const MediaSource::ReadOptions *options = NULL);
-    virtual status_t extractGlobalDescriptions(Parcel *parcel);
-    virtual sp<MetaData> getFormat();
-
-protected:
-    virtual ~TimedText3GPPSource();
-
-private:
-    sp<MediaSource> mSource;
-
-    status_t extractAndAppendLocalDescriptions(
-            int64_t timeUs, const MediaBuffer *textBuffer, Parcel *parcel);
-
-    DISALLOW_EVIL_CONSTRUCTORS(TimedText3GPPSource);
-};
-
-}  // namespace android
-
-#endif  // TIMED_TEXT_3GPP_SOURCE_H_
diff --git a/media/libstagefright/timedtext/TimedTextDriver.cpp b/media/libstagefright/timedtext/TimedTextDriver.cpp
deleted file mode 100644
index 55a9803..0000000
--- a/media/libstagefright/timedtext/TimedTextDriver.cpp
+++ /dev/null
@@ -1,287 +0,0 @@
- /*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TimedTextDriver"
-#include <utils/Log.h>
-
-#include <binder/IPCThreadState.h>
-
-#include <media/IMediaHTTPService.h>
-#include <media/mediaplayer.h>
-#include <media/MediaPlayerInterface.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/timedtext/TimedTextDriver.h>
-
-#include "TextDescriptions.h"
-#include "TimedTextPlayer.h"
-#include "TimedTextSource.h"
-
-namespace android {
-
-TimedTextDriver::TimedTextDriver(
-        const wp<MediaPlayerBase> &listener,
-        const sp<IMediaHTTPService> &httpService)
-    : mLooper(new ALooper),
-      mListener(listener),
-      mHTTPService(httpService),
-      mState(UNINITIALIZED),
-      mCurrentTrackIndex(UINT_MAX) {
-    mLooper->setName("TimedTextDriver");
-    mLooper->start();
-    mPlayer = new TimedTextPlayer(listener);
-    mLooper->registerHandler(mPlayer);
-}
-
-TimedTextDriver::~TimedTextDriver() {
-    mTextSourceVector.clear();
-    mTextSourceTypeVector.clear();
-    mLooper->stop();
-}
-
-status_t TimedTextDriver::selectTrack_l(size_t index) {
-    if (mCurrentTrackIndex == index) {
-        return OK;
-    }
-    sp<TimedTextSource> source;
-    source = mTextSourceVector.valueFor(index);
-    mPlayer->setDataSource(source);
-    if (mState == UNINITIALIZED) {
-        mState = PREPARED;
-    }
-    mCurrentTrackIndex = index;
-    return OK;
-}
-
-status_t TimedTextDriver::start() {
-    Mutex::Autolock autoLock(mLock);
-    switch (mState) {
-        case UNINITIALIZED:
-            return INVALID_OPERATION;
-        case PLAYING:
-            return OK;
-        case PREPARED:
-            mPlayer->start();
-            mState = PLAYING;
-            return OK;
-        case PAUSED:
-            mPlayer->resume();
-            mState = PLAYING;
-            return OK;
-        default:
-            TRESPASS();
-    }
-    return UNKNOWN_ERROR;
-}
-
-status_t TimedTextDriver::pause() {
-    Mutex::Autolock autoLock(mLock);
-    ALOGV("%s() is called", __FUNCTION__);
-    switch (mState) {
-        case UNINITIALIZED:
-            return INVALID_OPERATION;
-        case PLAYING:
-            mPlayer->pause();
-            mState = PAUSED;
-            return OK;
-        case PREPARED:
-            return INVALID_OPERATION;
-        case PAUSED:
-            return OK;
-        default:
-            TRESPASS();
-    }
-    return UNKNOWN_ERROR;
-}
-
-status_t TimedTextDriver::selectTrack(size_t index) {
-    status_t ret = OK;
-    Mutex::Autolock autoLock(mLock);
-    ALOGV("%s() is called", __FUNCTION__);
-    switch (mState) {
-        case UNINITIALIZED:
-        case PREPARED:
-        case PAUSED:
-            ret = selectTrack_l(index);
-            break;
-        case PLAYING:
-            mPlayer->pause();
-            ret = selectTrack_l(index);
-            if (ret != OK) {
-                break;
-            }
-            mPlayer->start();
-            break;
-        default:
-            TRESPASS();
-    }
-    return ret;
-}
-
-status_t TimedTextDriver::unselectTrack(size_t index) {
-    Mutex::Autolock autoLock(mLock);
-    ALOGV("%s() is called", __FUNCTION__);
-    if (mCurrentTrackIndex != index) {
-        return INVALID_OPERATION;
-    }
-    mCurrentTrackIndex = UINT_MAX;
-    switch (mState) {
-        case UNINITIALIZED:
-            return INVALID_OPERATION;
-        case PLAYING:
-            mPlayer->setDataSource(NULL);
-            mState = UNINITIALIZED;
-            return OK;
-        case PREPARED:
-        case PAUSED:
-            mState = UNINITIALIZED;
-            return OK;
-        default:
-            TRESPASS();
-    }
-    return UNKNOWN_ERROR;
-}
-
-status_t TimedTextDriver::seekToAsync(int64_t timeUs) {
-    Mutex::Autolock autoLock(mLock);
-    ALOGV("%s() is called", __FUNCTION__);
-    switch (mState) {
-        case UNINITIALIZED:
-            return INVALID_OPERATION;
-        case PREPARED:
-            mPlayer->seekToAsync(timeUs);
-            mPlayer->pause();
-            mState = PAUSED;
-            return OK;
-        case PAUSED:
-            mPlayer->seekToAsync(timeUs);
-            mPlayer->pause();
-            return OK;
-        case PLAYING:
-            mPlayer->seekToAsync(timeUs);
-            return OK;
-        default:
-            TRESPASS();
-    }
-    return UNKNOWN_ERROR;
-}
-
-status_t TimedTextDriver::addInBandTextSource(
-        size_t trackIndex, const sp<MediaSource>& mediaSource) {
-    sp<TimedTextSource> source =
-            TimedTextSource::CreateTimedTextSource(mediaSource);
-    if (source == NULL) {
-        return ERROR_UNSUPPORTED;
-    }
-    Mutex::Autolock autoLock(mLock);
-    mTextSourceVector.add(trackIndex, source);
-    mTextSourceTypeVector.add(TEXT_SOURCE_TYPE_IN_BAND);
-    return OK;
-}
-
-status_t TimedTextDriver::addOutOfBandTextSource(
-        size_t trackIndex, const char *uri, const char *mimeType) {
-
-    // To support local subtitle file only for now
-    if (strncasecmp("file://", uri, 7)) {
-        ALOGE("uri('%s') is not a file", uri);
-        return ERROR_UNSUPPORTED;
-    }
-
-    sp<DataSource> dataSource =
-            DataSource::CreateFromURI(mHTTPService, uri);
-    return createOutOfBandTextSource(trackIndex, mimeType, dataSource);
-}
-
-status_t TimedTextDriver::addOutOfBandTextSource(
-        size_t trackIndex, int fd, off64_t offset, off64_t length, const char *mimeType) {
-
-    if (fd < 0) {
-        ALOGE("Invalid file descriptor: %d", fd);
-        return ERROR_UNSUPPORTED;
-    }
-
-    sp<DataSource> dataSource = new FileSource(dup(fd), offset, length);
-    return createOutOfBandTextSource(trackIndex, mimeType, dataSource);
-}
-
-status_t TimedTextDriver::createOutOfBandTextSource(
-        size_t trackIndex,
-        const char *mimeType,
-        const sp<DataSource>& dataSource) {
-
-    if (dataSource == NULL) {
-        return ERROR_UNSUPPORTED;
-    }
-
-    sp<TimedTextSource> source;
-    if (strcasecmp(mimeType, MEDIA_MIMETYPE_TEXT_SUBRIP) == 0) {
-        source = TimedTextSource::CreateTimedTextSource(
-                dataSource, TimedTextSource::OUT_OF_BAND_FILE_SRT);
-    }
-
-    if (source == NULL) {
-        ALOGE("Failed to create timed text source");
-        return ERROR_UNSUPPORTED;
-    }
-
-    Mutex::Autolock autoLock(mLock);
-    mTextSourceVector.add(trackIndex, source);
-    mTextSourceTypeVector.add(TEXT_SOURCE_TYPE_OUT_OF_BAND);
-    return OK;
-}
-
-size_t TimedTextDriver::countExternalTracks() const {
-    size_t nTracks = 0;
-    for (size_t i = 0, n = mTextSourceTypeVector.size(); i < n; ++i) {
-        if (mTextSourceTypeVector[i] == TEXT_SOURCE_TYPE_OUT_OF_BAND) {
-            ++nTracks;
-        }
-    }
-    return nTracks;
-}
-
-void TimedTextDriver::getExternalTrackInfo(Parcel *parcel) {
-    Mutex::Autolock autoLock(mLock);
-    for (size_t i = 0, n = mTextSourceTypeVector.size(); i < n; ++i) {
-        if (mTextSourceTypeVector[i] == TEXT_SOURCE_TYPE_IN_BAND) {
-            continue;
-        }
-
-        sp<MetaData> meta = mTextSourceVector.valueAt(i)->getFormat();
-
-        // There are two fields.
-        parcel->writeInt32(2);
-
-        // track type.
-        parcel->writeInt32(MEDIA_TRACK_TYPE_TIMEDTEXT);
-        const char *lang = "und";
-        if (meta != NULL) {
-            meta->findCString(kKeyMediaLanguage, &lang);
-        }
-        parcel->writeString16(String16(lang));
-    }
-}
-
-}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.cpp b/media/libstagefright/timedtext/TimedTextPlayer.cpp
deleted file mode 100644
index aecf666..0000000
--- a/media/libstagefright/timedtext/TimedTextPlayer.cpp
+++ /dev/null
@@ -1,316 +0,0 @@
- /*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TimedTextPlayer"
-#include <utils/Log.h>
-
-#include <inttypes.h>
-#include <limits.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/timedtext/TimedTextDriver.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/MediaPlayerInterface.h>
-
-#include "TimedTextPlayer.h"
-
-#include "TimedTextSource.h"
-
-namespace android {
-
-// Event should be fired a bit earlier considering the processing time till
-// application actually gets the notification message.
-static const int64_t kAdjustmentProcessingTimeUs = 100000ll;
-static const int64_t kMaxDelayUs = 5000000ll;
-static const int64_t kWaitTimeUsToRetryRead = 100000ll;
-static const int64_t kInvalidTimeUs = INT_MIN;
-
-TimedTextPlayer::TimedTextPlayer(const wp<MediaPlayerBase> &listener)
-    : mListener(listener),
-      mSource(NULL),
-      mPendingSeekTimeUs(kInvalidTimeUs),
-      mPaused(false),
-      mSendSubtitleGeneration(0) {
-}
-
-TimedTextPlayer::~TimedTextPlayer() {
-    if (mSource != NULL) {
-        mSource->stop();
-        mSource.clear();
-        mSource = NULL;
-    }
-}
-
-void TimedTextPlayer::start() {
-    (new AMessage(kWhatStart, this))->post();
-}
-
-void TimedTextPlayer::pause() {
-    (new AMessage(kWhatPause, this))->post();
-}
-
-void TimedTextPlayer::resume() {
-    (new AMessage(kWhatResume, this))->post();
-}
-
-void TimedTextPlayer::seekToAsync(int64_t timeUs) {
-    sp<AMessage> msg = new AMessage(kWhatSeek, this);
-    msg->setInt64("seekTimeUs", timeUs);
-    msg->post();
-}
-
-void TimedTextPlayer::setDataSource(sp<TimedTextSource> source) {
-    sp<AMessage> msg = new AMessage(kWhatSetSource, this);
-    msg->setObject("source", source);
-    msg->post();
-}
-
-void TimedTextPlayer::onMessageReceived(const sp<AMessage> &msg) {
-    switch (msg->what()) {
-        case kWhatPause: {
-            mPaused = true;
-            break;
-        }
-        case kWhatResume: {
-            mPaused = false;
-            if (mPendingSeekTimeUs != kInvalidTimeUs) {
-                seekToAsync(mPendingSeekTimeUs);
-                mPendingSeekTimeUs = kInvalidTimeUs;
-            } else {
-                doRead();
-            }
-            break;
-        }
-        case kWhatStart: {
-            sp<MediaPlayerBase> listener = mListener.promote();
-            if (listener == NULL) {
-                ALOGE("Listener is NULL when kWhatStart is received.");
-                break;
-            }
-            mPaused = false;
-            mPendingSeekTimeUs = kInvalidTimeUs;
-            int32_t positionMs = 0;
-            listener->getCurrentPosition(&positionMs);
-            int64_t seekTimeUs = positionMs * 1000ll;
-
-            notifyListener();
-            mSendSubtitleGeneration++;
-            doSeekAndRead(seekTimeUs);
-            break;
-        }
-        case kWhatRetryRead: {
-            int32_t generation = -1;
-            CHECK(msg->findInt32("generation", &generation));
-            if (generation != mSendSubtitleGeneration) {
-                // Drop obsolete msg.
-                break;
-            }
-            int64_t seekTimeUs;
-            int seekMode;
-            if (msg->findInt64("seekTimeUs", &seekTimeUs) &&
-                msg->findInt32("seekMode", &seekMode)) {
-                MediaSource::ReadOptions options;
-                options.setSeekTo(
-                    seekTimeUs,
-                    static_cast<MediaSource::ReadOptions::SeekMode>(seekMode));
-                doRead(&options);
-            } else {
-                doRead();
-            }
-            break;
-        }
-        case kWhatSeek: {
-            int64_t seekTimeUs = kInvalidTimeUs;
-            // Clear a displayed timed text before seeking.
-            notifyListener();
-            msg->findInt64("seekTimeUs", &seekTimeUs);
-            if (seekTimeUs == kInvalidTimeUs) {
-                sp<MediaPlayerBase> listener = mListener.promote();
-                if (listener != NULL) {
-                    int32_t positionMs = 0;
-                    listener->getCurrentPosition(&positionMs);
-                    seekTimeUs = positionMs * 1000ll;
-                }
-            }
-            if (mPaused) {
-                mPendingSeekTimeUs = seekTimeUs;
-                break;
-            }
-            mSendSubtitleGeneration++;
-            doSeekAndRead(seekTimeUs);
-            break;
-        }
-        case kWhatSendSubtitle: {
-            int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
-            if (generation != mSendSubtitleGeneration) {
-                // Drop obsolete msg.
-                break;
-            }
-            // If current time doesn't reach to the fire time,
-            // re-post the message with the adjusted delay time.
-            int64_t fireTimeUs = kInvalidTimeUs;
-            if (msg->findInt64("fireTimeUs", &fireTimeUs)) {
-                // TODO: check if fireTimeUs is not kInvalidTimeUs.
-                int64_t delayUs = delayUsFromCurrentTime(fireTimeUs);
-                if (delayUs > 0) {
-                    msg->post(delayUs);
-                    break;
-                }
-            }
-            sp<RefBase> obj;
-            if (msg->findObject("subtitle", &obj)) {
-                sp<ParcelEvent> parcelEvent;
-                parcelEvent = static_cast<ParcelEvent*>(obj.get());
-                notifyListener(&(parcelEvent->parcel));
-                doRead();
-            } else {
-                notifyListener();
-            }
-            break;
-        }
-        case kWhatSetSource: {
-            mSendSubtitleGeneration++;
-            sp<RefBase> obj;
-            msg->findObject("source", &obj);
-            if (mSource != NULL) {
-                mSource->stop();
-                mSource.clear();
-                mSource = NULL;
-            }
-            // null source means deselect track.
-            if (obj == NULL) {
-                mPendingSeekTimeUs = kInvalidTimeUs;
-                mPaused = false;
-                notifyListener();
-                break;
-            }
-            mSource = static_cast<TimedTextSource*>(obj.get());
-            status_t err = mSource->start();
-            if (err != OK) {
-                notifyError(err);
-                break;
-            }
-            Parcel parcel;
-            err = mSource->extractGlobalDescriptions(&parcel);
-            if (err != OK) {
-                notifyError(err);
-                break;
-            }
-            notifyListener(&parcel);
-            break;
-        }
-    }
-}
-
-void TimedTextPlayer::doSeekAndRead(int64_t seekTimeUs) {
-    MediaSource::ReadOptions options;
-    options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-    doRead(&options);
-}
-
-void TimedTextPlayer::doRead(MediaSource::ReadOptions* options) {
-    int64_t startTimeUs = 0;
-    int64_t endTimeUs = 0;
-    sp<ParcelEvent> parcelEvent = new ParcelEvent();
-    CHECK(mSource != NULL);
-    status_t err = mSource->read(&startTimeUs, &endTimeUs,
-                                 &(parcelEvent->parcel), options);
-    if (err == WOULD_BLOCK) {
-        sp<AMessage> msg = new AMessage(kWhatRetryRead, this);
-        if (options != NULL) {
-            int64_t seekTimeUs = kInvalidTimeUs;
-            MediaSource::ReadOptions::SeekMode seekMode =
-                MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC;
-            CHECK(options->getSeekTo(&seekTimeUs, &seekMode));
-            msg->setInt64("seekTimeUs", seekTimeUs);
-            msg->setInt32("seekMode", seekMode);
-        }
-        msg->setInt32("generation", mSendSubtitleGeneration);
-        msg->post(kWaitTimeUsToRetryRead);
-        return;
-    } else if (err != OK) {
-        notifyError(err);
-        return;
-    }
-
-    postTextEvent(parcelEvent, startTimeUs);
-    if (endTimeUs > 0) {
-        CHECK_GE(endTimeUs, startTimeUs);
-        // send an empty timed text to clear the subtitle when it reaches to the
-        // end time.
-        postTextEvent(NULL, endTimeUs);
-    }
-}
-
-void TimedTextPlayer::postTextEvent(const sp<ParcelEvent>& parcel, int64_t timeUs) {
-    int64_t delayUs = delayUsFromCurrentTime(timeUs);
-    sp<AMessage> msg = new AMessage(kWhatSendSubtitle, this);
-    msg->setInt32("generation", mSendSubtitleGeneration);
-    if (parcel != NULL) {
-        msg->setObject("subtitle", parcel);
-    }
-    msg->setInt64("fireTimeUs", timeUs);
-    msg->post(delayUs);
-}
-
-int64_t TimedTextPlayer::delayUsFromCurrentTime(int64_t fireTimeUs) {
-    sp<MediaPlayerBase> listener = mListener.promote();
-    if (listener == NULL) {
-        // TODO: it may be better to return kInvalidTimeUs
-        ALOGE("%s: Listener is NULL. (fireTimeUs = %" PRId64" )",
-              __FUNCTION__, fireTimeUs);
-        return 0;
-    }
-    int32_t positionMs = 0;
-    listener->getCurrentPosition(&positionMs);
-    int64_t positionUs = positionMs * 1000ll;
-
-    if (fireTimeUs <= positionUs + kAdjustmentProcessingTimeUs) {
-        return 0;
-    } else {
-        int64_t delayUs = fireTimeUs - positionUs - kAdjustmentProcessingTimeUs;
-        if (delayUs > kMaxDelayUs) {
-            return kMaxDelayUs;
-        }
-        return delayUs;
-    }
-}
-
-void TimedTextPlayer::notifyError(int error) {
-    sp<MediaPlayerBase> listener = mListener.promote();
-    if (listener == NULL) {
-        ALOGE("%s(error=%d): Listener is NULL.", __FUNCTION__, error);
-        return;
-    }
-    listener->sendEvent(MEDIA_INFO, MEDIA_INFO_TIMED_TEXT_ERROR, error);
-}
-
-void TimedTextPlayer::notifyListener(const Parcel *parcel) {
-    sp<MediaPlayerBase> listener = mListener.promote();
-    if (listener == NULL) {
-        ALOGE("%s: Listener is NULL.", __FUNCTION__);
-        return;
-    }
-    if (parcel != NULL && (parcel->dataSize() > 0)) {
-        listener->sendEvent(MEDIA_TIMED_TEXT, 0, 0, parcel);
-    } else {  // send an empty timed text to clear the screen
-        listener->sendEvent(MEDIA_TIMED_TEXT);
-    }
-}
-
-}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.h b/media/libstagefright/timedtext/TimedTextPlayer.h
deleted file mode 100644
index 9cb49ec..0000000
--- a/media/libstagefright/timedtext/TimedTextPlayer.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIMEDTEXT_PLAYER_H_
-#define TIMEDTEXT_PLAYER_H_
-
-#include <binder/Parcel.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AHandler.h>
-#include <media/stagefright/MediaSource.h>
-#include <utils/RefBase.h>
-
-#include "TimedTextSource.h"
-
-namespace android {
-
-struct AMessage;
-class MediaPlayerBase;
-class TimedTextDriver;
-class TimedTextSource;
-
-class TimedTextPlayer : public AHandler {
-public:
-    TimedTextPlayer(const wp<MediaPlayerBase> &listener);
-
-    virtual ~TimedTextPlayer();
-
-    void start();
-    void pause();
-    void resume();
-    void seekToAsync(int64_t timeUs);
-    void setDataSource(sp<TimedTextSource> source);
-
-protected:
-    virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
-    enum {
-        kWhatPause = 'paus',
-        kWhatResume = 'resm',
-        kWhatStart = 'strt',
-        kWhatSeek = 'seek',
-        kWhatRetryRead = 'read',
-        kWhatSendSubtitle = 'send',
-        kWhatSetSource = 'ssrc',
-    };
-
-    // To add Parcel into an AMessage as an object, it should be 'RefBase'.
-    struct ParcelEvent : public RefBase {
-        Parcel parcel;
-    };
-
-    wp<MediaPlayerBase> mListener;
-    sp<TimedTextSource> mSource;
-    int64_t mPendingSeekTimeUs;
-    bool mPaused;
-    int32_t mSendSubtitleGeneration;
-
-    void doSeekAndRead(int64_t seekTimeUs);
-    void doRead(MediaSource::ReadOptions* options = NULL);
-    void onTextEvent();
-    void postTextEvent(const sp<ParcelEvent>& parcel = NULL, int64_t timeUs = -1);
-    int64_t delayUsFromCurrentTime(int64_t fireTimeUs);
-    void notifyError(int error = 0);
-    void notifyListener(const Parcel *parcel = NULL);
-
-    DISALLOW_EVIL_CONSTRUCTORS(TimedTextPlayer);
-};
-
-}  // namespace android
-
-#endif  // TIMEDTEXT_PLAYER_H_
diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.cpp b/media/libstagefright/timedtext/TimedTextSRTSource.cpp
deleted file mode 100644
index 2ac1e72..0000000
--- a/media/libstagefright/timedtext/TimedTextSRTSource.cpp
+++ /dev/null
@@ -1,297 +0,0 @@
- /*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TimedTextSRTSource"
-#include <utils/Log.h>
-
-#include <binder/Parcel.h>
-#include <media/stagefright/foundation/ADebug.h>  // for CHECK_xx
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaDefs.h>  // for MEDIA_MIMETYPE_xxx
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-#include "TimedTextSRTSource.h"
-#include "TextDescriptions.h"
-
-namespace android {
-
-TimedTextSRTSource::TimedTextSRTSource(const sp<DataSource>& dataSource)
-        : mSource(dataSource),
-          mMetaData(new MetaData),
-          mIndex(0) {
-    // TODO: Need to detect the language, because SRT doesn't give language
-    // information explicitly.
-    mMetaData->setCString(kKeyMediaLanguage, "und");
-}
-
-TimedTextSRTSource::~TimedTextSRTSource() {
-}
-
-status_t TimedTextSRTSource::start() {
-    status_t err = scanFile();
-    if (err != OK) {
-        reset();
-    }
-    return err;
-}
-
-void TimedTextSRTSource::reset() {
-    mTextVector.clear();
-    mIndex = 0;
-}
-
-status_t TimedTextSRTSource::stop() {
-    reset();
-    return OK;
-}
-
-status_t TimedTextSRTSource::read(
-        int64_t *startTimeUs,
-        int64_t *endTimeUs,
-        Parcel *parcel,
-        const MediaSource::ReadOptions *options) {
-    AString text;
-    status_t err = getText(options, &text, startTimeUs, endTimeUs);
-    if (err != OK) {
-        return err;
-    }
-
-    CHECK_GE(*startTimeUs, 0);
-    extractAndAppendLocalDescriptions(*startTimeUs, text, parcel);
-    return OK;
-}
-
-sp<MetaData> TimedTextSRTSource::getFormat() {
-    return mMetaData;
-}
-
-status_t TimedTextSRTSource::scanFile() {
-    off64_t offset = 0;
-    int64_t startTimeUs;
-    bool endOfFile = false;
-
-    while (!endOfFile) {
-        TextInfo info;
-        status_t err = getNextSubtitleInfo(&offset, &startTimeUs, &info);
-        switch (err) {
-            case OK:
-                mTextVector.add(startTimeUs, info);
-                break;
-            case ERROR_END_OF_STREAM:
-                endOfFile = true;
-                break;
-            default:
-                return err;
-        }
-    }
-    if (mTextVector.isEmpty()) {
-        return ERROR_MALFORMED;
-    }
-    return OK;
-}
-
-/* SRT format:
- *   Subtitle number
- *   Start time --> End time
- *   Text of subtitle (one or more lines)
- *   Blank lines
- *
- * .srt file example:
- * 1
- * 00:00:20,000 --> 00:00:24,400
- * Altocumulus clouds occr between six thousand
- *
- * 2
- * 00:00:24,600 --> 00:00:27,800
- * and twenty thousand feet above ground level.
- */
-status_t TimedTextSRTSource::getNextSubtitleInfo(
-          off64_t *offset, int64_t *startTimeUs, TextInfo *info) {
-    AString data;
-    status_t err;
-
-    // To skip blank lines.
-    do {
-        if ((err = readNextLine(offset, &data)) != OK) {
-            return err;
-        }
-        data.trim();
-    } while (data.empty());
-
-    // Just ignore the first non-blank line which is subtitle sequence number.
-    if ((err = readNextLine(offset, &data)) != OK) {
-        return err;
-    }
-    int hour1, hour2, min1, min2, sec1, sec2, msec1, msec2;
-    // the start time format is: hours:minutes:seconds,milliseconds
-    // 00:00:24,600 --> 00:00:27,800
-    if (sscanf(data.c_str(), "%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d",
-               &hour1, &min1, &sec1, &msec1, &hour2, &min2, &sec2, &msec2) != 8) {
-        return ERROR_MALFORMED;
-    }
-
-    *startTimeUs = ((hour1 * 3600 + min1 * 60 + sec1) * 1000 + msec1) * 1000ll;
-    info->endTimeUs = ((hour2 * 3600 + min2 * 60 + sec2) * 1000 + msec2) * 1000ll;
-    if (info->endTimeUs <= *startTimeUs) {
-        return ERROR_MALFORMED;
-    }
-
-    info->offset = *offset;
-    bool needMoreData = true;
-    while (needMoreData) {
-        if ((err = readNextLine(offset, &data)) != OK) {
-            if (err == ERROR_END_OF_STREAM) {
-                break;
-            } else {
-                return err;
-            }
-        }
-
-        data.trim();
-        if (data.empty()) {
-            // it's an empty line used to separate two subtitles
-            needMoreData = false;
-        }
-    }
-    info->textLen = *offset - info->offset;
-    return OK;
-}
-
-status_t TimedTextSRTSource::readNextLine(off64_t *offset, AString *data) {
-    data->clear();
-    while (true) {
-        ssize_t readSize;
-        char character;
-        if ((readSize = mSource->readAt(*offset, &character, 1)) < 1) {
-            if (readSize == 0) {
-                return ERROR_END_OF_STREAM;
-            }
-            return ERROR_IO;
-        }
-
-        (*offset)++;
-
-        // a line could end with CR, LF or CR + LF
-        if (character == 10) {
-            break;
-        } else if (character == 13) {
-            if ((readSize = mSource->readAt(*offset, &character, 1)) < 1) {
-                if (readSize == 0) {  // end of the stream
-                    return OK;
-                }
-                return ERROR_IO;
-            }
-
-            (*offset)++;
-            if (character != 10) {
-                (*offset)--;
-            }
-            break;
-        }
-        data->append(character);
-    }
-    return OK;
-}
-
-status_t TimedTextSRTSource::getText(
-        const MediaSource::ReadOptions *options,
-        AString *text, int64_t *startTimeUs, int64_t *endTimeUs) {
-    if (mTextVector.size() == 0) {
-        return ERROR_END_OF_STREAM;
-    }
-    text->clear();
-    int64_t seekTimeUs;
-    MediaSource::ReadOptions::SeekMode mode;
-    if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
-        int64_t lastEndTimeUs =
-                mTextVector.valueAt(mTextVector.size() - 1).endTimeUs;
-        if (seekTimeUs < 0) {
-            return ERROR_OUT_OF_RANGE;
-        } else if (seekTimeUs >= lastEndTimeUs) {
-            return ERROR_END_OF_STREAM;
-        } else {
-            // binary search
-            size_t low = 0;
-            size_t high = mTextVector.size() - 1;
-            size_t mid = 0;
-
-            while (low <= high) {
-                mid = low + (high - low)/2;
-                int diff = compareExtendedRangeAndTime(mid, seekTimeUs);
-                if (diff == 0) {
-                    break;
-                } else if (diff < 0) {
-                    low = mid + 1;
-                } else {
-                    high = mid - 1;
-                }
-            }
-            mIndex = mid;
-        }
-    }
-
-    if (mIndex >= mTextVector.size()) {
-        return ERROR_END_OF_STREAM;
-    }
-
-    const TextInfo &info = mTextVector.valueAt(mIndex);
-    *startTimeUs = mTextVector.keyAt(mIndex);
-    *endTimeUs = info.endTimeUs;
-    mIndex++;
-
-    char *str = new char[info.textLen];
-    if (mSource->readAt(info.offset, str, info.textLen) < info.textLen) {
-        delete[] str;
-        return ERROR_IO;
-    }
-    text->append(str, info.textLen);
-    delete[] str;
-    return OK;
-}
-
-status_t TimedTextSRTSource::extractAndAppendLocalDescriptions(
-        int64_t timeUs, const AString &text, Parcel *parcel) {
-    const void *data = text.c_str();
-    size_t size = text.size();
-    int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS |
-                   TextDescriptions::OUT_OF_BAND_TEXT_SRT;
-
-    if (size > 0) {
-        return TextDescriptions::getParcelOfDescriptions(
-                (const uint8_t *)data, size, flag, timeUs / 1000, parcel);
-    }
-    return OK;
-}
-
-int TimedTextSRTSource::compareExtendedRangeAndTime(size_t index, int64_t timeUs) {
-    CHECK_LT(index, mTextVector.size());
-    int64_t endTimeUs = mTextVector.valueAt(index).endTimeUs;
-    int64_t startTimeUs = (index > 0) ?
-            mTextVector.valueAt(index - 1).endTimeUs : 0;
-    if (timeUs >= startTimeUs && timeUs < endTimeUs) {
-        return 0;
-    } else if (endTimeUs <= timeUs) {
-        return -1;
-    } else {
-        return 1;
-    }
-}
-
-}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.h b/media/libstagefright/timedtext/TimedTextSRTSource.h
deleted file mode 100644
index 232675e..0000000
--- a/media/libstagefright/timedtext/TimedTextSRTSource.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIMED_TEXT_SRT_SOURCE_H_
-#define TIMED_TEXT_SRT_SOURCE_H_
-
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <utils/Compat.h>  // off64_t
-
-#include "TimedTextSource.h"
-
-namespace android {
-
-struct AString;
-class DataSource;
-class MediaBuffer;
-class Parcel;
-
-class TimedTextSRTSource : public TimedTextSource {
-public:
-    TimedTextSRTSource(const sp<DataSource>& dataSource);
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t read(
-            int64_t *startTimeUs,
-            int64_t *endTimeUs,
-            Parcel *parcel,
-            const MediaSource::ReadOptions *options = NULL);
-    virtual sp<MetaData> getFormat();
-
-protected:
-    virtual ~TimedTextSRTSource();
-
-private:
-    sp<DataSource> mSource;
-    sp<MetaData> mMetaData;
-
-    struct TextInfo {
-        int64_t endTimeUs;
-        // The offset of the text in the original file.
-        off64_t offset;
-        int textLen;
-    };
-
-    size_t mIndex;
-    KeyedVector<int64_t, TextInfo> mTextVector;
-
-    void reset();
-    status_t scanFile();
-    status_t getNextSubtitleInfo(
-            off64_t *offset, int64_t *startTimeUs, TextInfo *info);
-    status_t readNextLine(off64_t *offset, AString *data);
-    status_t getText(
-            const MediaSource::ReadOptions *options,
-            AString *text, int64_t *startTimeUs, int64_t *endTimeUs);
-    status_t extractAndAppendLocalDescriptions(
-            int64_t timeUs, const AString &text, Parcel *parcel);
-
-    // Compares the time range of the subtitle at index to the given timeUs.
-    // The time range of the subtitle to match with given timeUs is extended to
-    // [endTimeUs of the previous subtitle, endTimeUs of current subtitle).
-    //
-    // This compare function is used to find a next subtitle when read() is
-    // called with seek options. Note that timeUs within gap ranges, such as
-    // [200, 300) in the below example, will be matched to the closest future
-    // subtitle, [300, 400).
-    //
-    // For instance, assuming there are 3 subtitles in mTextVector,
-    // 0: [100, 200)      ----> [0, 200)
-    // 1: [300, 400)      ----> [200, 400)
-    // 2: [500, 600)      ----> [400, 600)
-    // If the 'index' parameter contains 1, this function
-    // returns 0, if timeUs is in [200, 400)
-    // returns -1, if timeUs >= 400,
-    // returns 1, if timeUs < 200.
-    int compareExtendedRangeAndTime(size_t index, int64_t timeUs);
-
-    DISALLOW_EVIL_CONSTRUCTORS(TimedTextSRTSource);
-};
-
-}  // namespace android
-
-#endif  // TIMED_TEXT_SRT_SOURCE_H_
diff --git a/media/libstagefright/timedtext/TimedTextSource.cpp b/media/libstagefright/timedtext/TimedTextSource.cpp
deleted file mode 100644
index 953f7b5..0000000
--- a/media/libstagefright/timedtext/TimedTextSource.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
- /*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TimedTextSource"
-#include <utils/Log.h>
-
-#include <media/stagefright/foundation/ADebug.h>  // CHECK_XX macro
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaDefs.h>  // for MEDIA_MIMETYPE_xxx
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-#include "TimedTextSource.h"
-
-#include "TimedText3GPPSource.h"
-#include "TimedTextSRTSource.h"
-
-namespace android {
-
-// static
-sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
-        const sp<MediaSource>& mediaSource) {
-    const char *mime;
-    CHECK(mediaSource->getFormat()->findCString(kKeyMIMEType, &mime));
-    if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) {
-        return new TimedText3GPPSource(mediaSource);
-    }
-    ALOGE("Unsupported mime type for subtitle. : %s", mime);
-    return NULL;
-}
-
-// static
-sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
-        const sp<DataSource>& dataSource, FileType filetype) {
-    switch(filetype) {
-        case OUT_OF_BAND_FILE_SRT:
-            return new TimedTextSRTSource(dataSource);
-        case OUT_OF_BAND_FILE_SMI:
-            // TODO: Implement for SMI.
-            ALOGE("Supporting SMI is not implemented yet");
-            break;
-        default:
-            ALOGE("Undefined subtitle format. : %d", filetype);
-    }
-    return NULL;
-}
-
-sp<MetaData> TimedTextSource::getFormat() {
-    return NULL;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextSource.h b/media/libstagefright/timedtext/TimedTextSource.h
deleted file mode 100644
index 8c1c1cd..0000000
--- a/media/libstagefright/timedtext/TimedTextSource.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIMED_TEXT_SOURCE_H_
-#define TIMED_TEXT_SOURCE_H_
-
-#include <media/stagefright/foundation/ABase.h>  // for DISALLOW_XXX macro.
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>  // for MediaSource::ReadOptions
-#include <utils/RefBase.h>
-
-namespace android {
-
-class DataSource;
-class MetaData;
-class Parcel;
-
-class TimedTextSource : public RefBase {
- public:
-  enum FileType {
-      OUT_OF_BAND_FILE_SRT = 1,
-      OUT_OF_BAND_FILE_SMI = 2,
-  };
-  static sp<TimedTextSource> CreateTimedTextSource(
-      const sp<MediaSource>& source);
-  static sp<TimedTextSource> CreateTimedTextSource(
-      const sp<DataSource>& source, FileType filetype);
-  TimedTextSource() {}
-  virtual status_t start() = 0;
-  virtual status_t stop() = 0;
-  // Returns subtitle parcel and its start time.
-  virtual status_t read(
-          int64_t *startTimeUs,
-          int64_t *endTimeUs,
-          Parcel *parcel,
-          const MediaSource::ReadOptions *options = NULL) = 0;
-  virtual status_t extractGlobalDescriptions(Parcel * /* parcel */) {
-      return INVALID_OPERATION;
-  }
-  virtual sp<MetaData> getFormat();
-
- protected:
-  virtual ~TimedTextSource() { }
-
- private:
-  DISALLOW_EVIL_CONSTRUCTORS(TimedTextSource);
-};
-
-}  // namespace android
-
-#endif  // TIMED_TEXT_SOURCE_H_
diff --git a/media/libstagefright/timedtext/test/Android.mk b/media/libstagefright/timedtext/test/Android.mk
deleted file mode 100644
index e0e0e0d..0000000
--- a/media/libstagefright/timedtext/test/Android.mk
+++ /dev/null
@@ -1,32 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-# ================================================================
-# Unit tests for libstagefright_timedtext
-# ================================================================
-
-# ================================================================
-# A test for TimedTextSRTSource
-# ================================================================
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := TimedTextSRTSource_test
-
-LOCAL_MODULE_TAGS := eng tests
-
-LOCAL_SRC_FILES := TimedTextSRTSource_test.cpp
-
-LOCAL_C_INCLUDES := \
-    $(TOP)/external/expat/lib \
-    $(TOP)/frameworks/av/media/libstagefright/timedtext
-
-LOCAL_SHARED_LIBRARIES := \
-    libbinder \
-    libexpat \
-    libstagefright \
-    libstagefright_foundation \
-    libutils
-
-LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
-
-include $(BUILD_NATIVE_TEST)
diff --git a/media/libstagefright/timedtext/test/TimedTextSRTSource_test.cpp b/media/libstagefright/timedtext/test/TimedTextSRTSource_test.cpp
deleted file mode 100644
index 211e732..0000000
--- a/media/libstagefright/timedtext/test/TimedTextSRTSource_test.cpp
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "TimedTextSRTSource_test"
-#include <utils/Log.h>
-
-#include <gtest/gtest.h>
-
-#include <binder/Parcel.h>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaErrors.h>
-#include <utils/misc.h>
-
-#include <TimedTextSource.h>
-#include <TimedTextSRTSource.h>
-
-namespace android {
-namespace test {
-
-static const int kSecToUsec = 1000000;
-static const int kSecToMsec = 1000;
-static const int kMsecToUsec = 1000;
-
-/* SRT format (http://en.wikipedia.org/wiki/SubRip)
- *   Subtitle number
- *   Start time --> End time
- *   Text of subtitle (one or more lines)
- *   Blank lines
- */
-static const char *kSRTString =
-    "1\n00:00:1,000 --> 00:00:1,500\n1\n\n"
-    "2\n00:00:2,000 --> 00:00:2,500\n2\n\n"
-    "3\n00:00:3,000 --> 00:00:3,500\n3\n\n"
-    "4\n00:00:4,000 --> 00:00:4,500\n4\n\n"
-    "5\n00:00:5,000 --> 00:00:5,500\n5\n\n"
-    // edge case : previos end time = next start time
-    "6\n00:00:5,500 --> 00:00:5,800\n6\n\n"
-    "7\n00:00:5,800 --> 00:00:6,000\n7\n\n"
-    "8\n00:00:6,000 --> 00:00:7,000\n8\n\n";
-
-class SRTDataSourceStub : public DataSource {
-public:
-    SRTDataSourceStub(const char *data, size_t size) :
-        mData(data), mSize(size) {}
-    virtual ~SRTDataSourceStub() {}
-
-    virtual status_t initCheck() const {
-        return OK;
-    }
-
-    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
-        if ((size_t)offset >= mSize) return 0;
-
-        ssize_t avail = mSize - offset;
-        if ((size_t)avail > size) {
-            avail = size;
-        }
-        memcpy(data, mData + offset, avail);
-        return avail;
-    }
-
-private:
-    const char *mData;
-    size_t mSize;
-};
-
-class TimedTextSRTSourceTest : public testing::Test {
-protected:
-    void SetUp() {
-        sp<DataSource> stub= new SRTDataSourceStub(
-                kSRTString,
-                strlen(kSRTString));
-        mSource = new TimedTextSRTSource(stub);
-        mSource->start();
-    }
-
-    void CheckStartTimeMs(const Parcel& parcel, int32_t timeMs) {
-        int32_t intval;
-        parcel.setDataPosition(8);
-        parcel.readInt32(&intval);
-        EXPECT_EQ(timeMs, intval);
-    }
-
-    void CheckDataEquals(const Parcel& parcel, const char* content) {
-        int32_t intval;
-        parcel.setDataPosition(16);
-        parcel.readInt32(&intval);
-        parcel.setDataPosition(24);
-        const char* data = (const char*) parcel.readInplace(intval);
-
-        int32_t content_len = strlen(content);
-        EXPECT_EQ(content_len, intval);
-        EXPECT_TRUE(strncmp(data, content, content_len) == 0);
-    }
-
-    sp<TimedTextSource> mSource;
-    int64_t startTimeUs;
-    int64_t endTimeUs;
-    Parcel parcel;
-    AString subtitle;
-    status_t err;
-};
-
-TEST_F(TimedTextSRTSourceTest, readAll) {
-    for (int i = 1; i <= 5; i++) {
-        err = mSource->read(&startTimeUs, &endTimeUs, &parcel);
-        EXPECT_EQ(OK, err);
-        CheckStartTimeMs(parcel, i * kSecToMsec);
-        subtitle = AStringPrintf("%d\n\n", i);
-        CheckDataEquals(parcel, subtitle.c_str());
-    }
-    // read edge cases
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel);
-    EXPECT_EQ(OK, err);
-    CheckStartTimeMs(parcel, 5500);
-    subtitle = AStringPrintf("6\n\n");
-    CheckDataEquals(parcel, subtitle.c_str());
-
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel);
-    EXPECT_EQ(OK, err);
-    CheckStartTimeMs(parcel, 5800);
-    subtitle = AStringPrintf("7\n\n");
-    CheckDataEquals(parcel, subtitle.c_str());
-
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel);
-    EXPECT_EQ(OK, err);
-    CheckStartTimeMs(parcel, 6000);
-    subtitle = AStringPrintf("8\n\n");
-    CheckDataEquals(parcel, subtitle.c_str());
-
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel);
-    EXPECT_EQ(ERROR_END_OF_STREAM, err);
-}
-
-TEST_F(TimedTextSRTSourceTest, seekTimeIsEarlierThanFirst) {
-    MediaSource::ReadOptions options;
-    options.setSeekTo(500, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-    EXPECT_EQ(OK, err);
-    EXPECT_EQ(1 * kSecToUsec, startTimeUs);
-    CheckStartTimeMs(parcel, 1 * kSecToMsec);
-}
-
-TEST_F(TimedTextSRTSourceTest, seekTimeIsLaterThanLast) {
-    MediaSource::ReadOptions options;
-    options.setSeekTo(7 * kSecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-    EXPECT_EQ(ERROR_END_OF_STREAM, err);
-
-    options.setSeekTo(8 * kSecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-    EXPECT_EQ(ERROR_END_OF_STREAM, err);
-}
-
-TEST_F(TimedTextSRTSourceTest, seekTimeIsMatched) {
-    for (int i = 1; i <= 5; i++) {
-        MediaSource::ReadOptions options;
-        options.setSeekTo(i * kSecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-        err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-        EXPECT_EQ(OK, err);
-        EXPECT_EQ(i * kSecToUsec, startTimeUs);
-
-        options.setSeekTo(i * kSecToUsec + 100, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-        err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-        EXPECT_EQ(OK, err);
-        EXPECT_EQ(i * kSecToUsec, startTimeUs);
-    }
-}
-
-TEST_F(TimedTextSRTSourceTest, seekTimeInBetweenTwo) {
-    for (int i = 1; i <= 4; i++) {
-        MediaSource::ReadOptions options;
-        options.setSeekTo(i * kSecToUsec + 500000, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-        err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-        EXPECT_EQ(OK, err);
-        EXPECT_EQ((i + 1) * kSecToUsec, startTimeUs);
-
-        options.setSeekTo(i * kSecToUsec + 600000, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-        err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-        EXPECT_EQ(OK, err);
-        EXPECT_EQ((i + 1) * kSecToUsec, startTimeUs);
-    }
-}
-
-TEST_F(TimedTextSRTSourceTest, checkEdgeCase) {
-    MediaSource::ReadOptions options;
-    options.setSeekTo(5500 * kMsecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-    EXPECT_EQ(OK, err);
-    EXPECT_EQ(5500 * kMsecToUsec, startTimeUs);
-    subtitle = AStringPrintf("6\n\n");
-    CheckDataEquals(parcel, subtitle.c_str());
-
-    options.setSeekTo(5800 * kMsecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-    EXPECT_EQ(OK, err);
-    EXPECT_EQ(5800 * kMsecToUsec, startTimeUs);
-    subtitle = AStringPrintf("7\n\n");
-    CheckDataEquals(parcel, subtitle.c_str());
-
-    options.setSeekTo(6000 * kMsecToUsec, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-    err = mSource->read(&startTimeUs, &endTimeUs, &parcel, &options);
-    EXPECT_EQ(OK, err);
-    EXPECT_EQ(6000 * kMsecToUsec, startTimeUs);
-    subtitle = AStringPrintf("8\n\n");
-    CheckDataEquals(parcel, subtitle.c_str());
-}
-
-}  // namespace test
-}  // namespace android
diff --git a/media/libstagefright/webm/Android.mk b/media/libstagefright/webm/Android.mk
index bc53c56..ce580ae 100644
--- a/media/libstagefright/webm/Android.mk
+++ b/media/libstagefright/webm/Android.mk
@@ -5,6 +5,7 @@
 
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow
 
 LOCAL_SRC_FILES:= EbmlUtil.cpp        \
                   WebmElement.cpp     \
diff --git a/media/libstagefright/webm/EbmlUtil.cpp b/media/libstagefright/webm/EbmlUtil.cpp
index 449fec6..c519f5c 100644
--- a/media/libstagefright/webm/EbmlUtil.cpp
+++ b/media/libstagefright/webm/EbmlUtil.cpp
@@ -29,7 +29,9 @@
 };
 
 int numberOfTrailingZeros32(int32_t i) {
-    uint32_t u = (i & -i) * 0x0450FBAF;
+    int64_t i64 = i;
+    i64 = (i64 & -i64) * 0x0450FBAF;
+    uint32_t u = i64;
     return NTZ_TABLE[(u) >> 26];
 }
 
diff --git a/media/libstagefright/webm/WebmConstants.h b/media/libstagefright/webm/WebmConstants.h
index c53f458..3111559 100644
--- a/media/libstagefright/webm/WebmConstants.h
+++ b/media/libstagefright/webm/WebmConstants.h
@@ -98,6 +98,24 @@
     kMkvDisplayHeight = 0x54BA,
     kMkvDisplayUnit = 0x54B2,
     kMkvAspectRatioType = 0x54B3,
+    kMkvColour = 0x55B0,
+    kMkvMatrixCoefficients = 0x55B1,
+    kMkvRange = 0x55B9,
+    kMkvTransferCharacteristics = 0x55BA,
+    kMkvPrimaries = 0x55BB,
+    kMkvMaxCLL = 0x55BC,
+    kMkvMaxFALL = 0x55BD,
+    kMkvMasteringMetadata = 0x55D0,
+    kMkvPrimaryRChromaticityX = 0x55D1,
+    kMkvPrimaryRChromaticityY = 0x55D2,
+    kMkvPrimaryGChromaticityX = 0x55D3,
+    kMkvPrimaryGChromaticityY = 0x55D4,
+    kMkvPrimaryBChromaticityX = 0x55D5,
+    kMkvPrimaryBChromaticityY = 0x55D6,
+    kMkvWhitePointChromaticityX = 0x55D7,
+    kMkvWhitePointChromaticityY = 0x55D8,
+    kMkvLuminanceMax = 0x55D9,
+    kMkvLuminanceMin = 0x55DA,
     kMkvFrameRate = 0x2383E3,
     kMkvAudio = 0xE1,
     kMkvSamplingFrequency = 0xB5,
diff --git a/media/libstagefright/webm/WebmElement.cpp b/media/libstagefright/webm/WebmElement.cpp
index a008cab..a5120b9 100644
--- a/media/libstagefright/webm/WebmElement.cpp
+++ b/media/libstagefright/webm/WebmElement.cpp
@@ -22,6 +22,8 @@
 #include "WebmConstants.h"
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/MetaData.h>
 #include <utils/Log.h>
 
 #include <string.h>
@@ -338,8 +340,10 @@
 }
 
 sp<WebmElement> WebmElement::VideoTrackEntry(
+        const char *codec,
         uint64_t width,
         uint64_t height,
+        const sp<MetaData> &meta,
         uint64_t uid,
         bool lacing,
         const char *lang) {
@@ -353,14 +357,116 @@
             uid,
             lacing,
             lang,
-            "V_VP8",
+            codec,
             kVideoType,
             trackEntryFields);
 
+    // CSD
+    uint32_t type;
+    const void *data;
+    size_t size;
+    if (meta->findData(kKeyVp9CodecPrivate, &type, &data, &size)) {
+        sp<ABuffer> buf = new ABuffer((void *)data, size); // note: buf does not own data
+        trackEntryFields.push_back(new WebmBinary(kMkvCodecPrivate, buf));
+    }
+
     List<sp<WebmElement> > videoInfo;
     videoInfo.push_back(new WebmUnsigned(kMkvPixelWidth, width));
     videoInfo.push_back(new WebmUnsigned(kMkvPixelHeight, height));
 
+    // Color aspects
+    {
+        List<sp<WebmElement> > colorInfo;
+
+        ColorAspects aspects;
+        aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+        aspects.mTransfer = ColorAspects::TransferUnspecified;
+        aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+        aspects.mRange = ColorAspects::RangeUnspecified;
+        bool havePrimaries = meta->findInt32(kKeyColorPrimaries, (int32_t*)&aspects.mPrimaries);
+        bool haveTransfer = meta->findInt32(kKeyTransferFunction, (int32_t*)&aspects.mTransfer);
+        bool haveCoeffs = meta->findInt32(kKeyColorMatrix, (int32_t*)&aspects.mMatrixCoeffs);
+        bool haveRange = meta->findInt32(kKeyColorRange, (int32_t*)&aspects.mRange);
+
+        int32_t primaries, transfer, coeffs;
+        bool fullRange;
+        ColorUtils::convertCodecColorAspectsToIsoAspects(
+                aspects, &primaries, &transfer, &coeffs, &fullRange);
+        if (havePrimaries) {
+            colorInfo.push_back(new WebmUnsigned(kMkvPrimaries, primaries));
+        }
+        if (haveTransfer) {
+            colorInfo.push_back(new WebmUnsigned(kMkvTransferCharacteristics, transfer));
+        }
+        if (haveCoeffs) {
+            colorInfo.push_back(new WebmUnsigned(kMkvMatrixCoefficients, coeffs));
+        }
+        if (haveRange) {
+            colorInfo.push_back(new WebmUnsigned(kMkvRange, fullRange ? 2 : 1));
+        }
+
+        // Also add HDR static info, some of which goes to MasteringMetadata element
+
+        const HDRStaticInfo *info;
+        uint32_t type;
+        const void *data;
+        size_t size;
+        if (meta->findData(kKeyHdrStaticInfo, &type, &data, &size)
+                && type == 'hdrS' && size == sizeof(*info)) {
+            info = (const HDRStaticInfo*)data;
+            if (info->mID == HDRStaticInfo::kType1) {
+                List<sp<WebmElement> > masteringInfo;
+
+                // convert HDRStaticInfo values to matroska equivalent values for each non-0 group
+                if (info->sType1.mMaxFrameAverageLightLevel) {
+                    colorInfo.push_back(new WebmUnsigned(
+                            kMkvMaxFALL, info->sType1.mMaxFrameAverageLightLevel));
+                }
+                if (info->sType1.mMaxContentLightLevel) {
+                    colorInfo.push_back(new WebmUnsigned(
+                            kMkvMaxCLL, info->sType1.mMaxContentLightLevel));
+                }
+                if (info->sType1.mMinDisplayLuminance) {
+                    // HDRStaticInfo Type1 stores min luminance scaled 10000:1
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvLuminanceMin, info->sType1.mMinDisplayLuminance * 0.0001));
+                }
+                if (info->sType1.mMaxDisplayLuminance) {
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvLuminanceMax, (float)info->sType1.mMaxDisplayLuminance));
+                }
+                // HDRStaticInfo Type1 stores primaries scaled 50000:1
+                if (info->sType1.mW.x || info->sType1.mW.y) {
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvWhitePointChromaticityX, info->sType1.mW.x * 0.00002));
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvWhitePointChromaticityY, info->sType1.mW.y * 0.00002));
+                }
+                if (info->sType1.mR.x || info->sType1.mR.y || info->sType1.mG.x
+                        || info->sType1.mG.y || info->sType1.mB.x || info->sType1.mB.y) {
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvPrimaryRChromaticityX, info->sType1.mR.x * 0.00002));
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvPrimaryRChromaticityY, info->sType1.mR.y * 0.00002));
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvPrimaryGChromaticityX, info->sType1.mG.x * 0.00002));
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvPrimaryGChromaticityY, info->sType1.mG.y * 0.00002));
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvPrimaryBChromaticityX, info->sType1.mB.x * 0.00002));
+                    masteringInfo.push_back(new WebmFloat(
+                            kMkvPrimaryBChromaticityY, info->sType1.mB.y * 0.00002));
+                }
+                if (masteringInfo.size()) {
+                    colorInfo.push_back(new WebmMaster(kMkvMasteringMetadata, masteringInfo));
+                }
+            }
+        }
+        if (colorInfo.size()) {
+            videoInfo.push_back(new WebmMaster(kMkvColour, colorInfo));
+        }
+    }
+
     trackEntryFields.push_back(new WebmMaster(kMkvVideo, videoInfo));
     return new WebmMaster(kMkvTrackEntry, trackEntryFields);
 }
diff --git a/media/libstagefright/webm/WebmElement.h b/media/libstagefright/webm/WebmElement.h
index f19933e..4e90793 100644
--- a/media/libstagefright/webm/WebmElement.h
+++ b/media/libstagefright/webm/WebmElement.h
@@ -24,6 +24,8 @@
 
 namespace android {
 
+class MetaData;
+
 struct WebmElement : public LightRefBase<WebmElement> {
     const uint64_t mId, mSize;
 
@@ -57,8 +59,10 @@
             const char *lang = "und");
 
     static sp<WebmElement> VideoTrackEntry(
+            const char *codec,
             uint64_t width,
             uint64_t height,
+            const sp<MetaData> &md,
             uint64_t uid = 0,
             bool lacing = false,
             const char *lang = "und");
diff --git a/media/libstagefright/webm/WebmFrameThread.cpp b/media/libstagefright/webm/WebmFrameThread.cpp
index a4b8a42..7eb4745 100644
--- a/media/libstagefright/webm/WebmFrameThread.cpp
+++ b/media/libstagefright/webm/WebmFrameThread.cpp
@@ -246,7 +246,7 @@
 }
 
 WebmFrameMediaSourceThread::WebmFrameMediaSourceThread(
-        const sp<MediaSource>& source,
+        const sp<IMediaSource>& source,
         int type,
         LinkedBlockingQueue<const sp<WebmFrame> >& sink,
         uint64_t timeCodeScale,
diff --git a/media/libstagefright/webm/WebmFrameThread.h b/media/libstagefright/webm/WebmFrameThread.h
index d65d9b7..528984f 100644
--- a/media/libstagefright/webm/WebmFrameThread.h
+++ b/media/libstagefright/webm/WebmFrameThread.h
@@ -123,7 +123,7 @@
 class WebmFrameMediaSourceThread: public WebmFrameSourceThread {
 public:
     WebmFrameMediaSourceThread(
-            const sp<MediaSource>& source,
+            const sp<IMediaSource>& source,
             int type,
             LinkedBlockingQueue<const sp<WebmFrame> >& sink,
             uint64_t timeCodeScale,
@@ -142,7 +142,7 @@
     }
 
 private:
-    const sp<MediaSource> mSource;
+    const sp<IMediaSource> mSource;
     const uint64_t mTimeCodeScale;
     uint64_t mStartTimeUs;
 
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index 737f144..e58964d 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -83,9 +83,30 @@
 // static
 sp<WebmElement> WebmWriter::videoTrack(const sp<MetaData>& md) {
     int32_t width, height;
-    CHECK(md->findInt32(kKeyWidth, &width));
-    CHECK(md->findInt32(kKeyHeight, &height));
-    return WebmElement::VideoTrackEntry(width, height);
+    const char *mimeType;
+    if (!md->findInt32(kKeyWidth, &width)
+            || !md->findInt32(kKeyHeight, &height)
+            || !md->findCString(kKeyMIMEType, &mimeType)) {
+        ALOGE("Missing format keys for video track");
+        md->dumpToLog();
+        return NULL;
+    }
+    const char *codec;
+    if (!strncasecmp(
+            mimeType,
+            MEDIA_MIMETYPE_VIDEO_VP8,
+            strlen(MEDIA_MIMETYPE_VIDEO_VP8))) {
+        codec = "V_VP8";
+    } else if (!strncasecmp(
+            mimeType,
+            MEDIA_MIMETYPE_VIDEO_VP9,
+            strlen(MEDIA_MIMETYPE_VIDEO_VP9))) {
+        codec = "V_VP9";
+    } else {
+        ALOGE("Unsupported codec: %s", mimeType);
+        return NULL;
+    }
+    return WebmElement::VideoTrackEntry(codec, width, height, md);
 }
 
 // static
@@ -98,10 +119,14 @@
     const void *headerData3;
     size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
 
-    CHECK(md->findInt32(kKeyChannelCount, &nChannels));
-    CHECK(md->findInt32(kKeySampleRate, &samplerate));
-    CHECK(md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1));
-    CHECK(md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3));
+    if (!md->findInt32(kKeyChannelCount, &nChannels)
+            || !md->findInt32(kKeySampleRate, &samplerate)
+            || !md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1)
+            || !md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3)) {
+        ALOGE("Missing format keys for audio track");
+        md->dumpToLog();
+        return NULL;
+    }
 
     size_t codecPrivateSize = 1;
     codecPrivateSize += XiphLaceCodeLen(headerSize1);
@@ -227,6 +252,11 @@
     mFd = -1;
     mInitCheck = NO_INIT;
     mStarted = false;
+    for (size_t ix = 0; ix < kMaxStreams; ++ix) {
+        mStreams[ix].mTrackEntry.clear();
+        mStreams[ix].mSource.clear();
+    }
+    mStreamsInOrder.clear();
 }
 
 status_t WebmWriter::reset() {
@@ -259,6 +289,8 @@
         if (durationUs < minDurationUs) {
             minDurationUs = durationUs;
         }
+
+        mStreams[i].mThread.clear();
     }
 
     if (numTracks() > 1) {
@@ -328,7 +360,7 @@
     return err;
 }
 
-status_t WebmWriter::addSource(const sp<MediaSource> &source) {
+status_t WebmWriter::addSource(const sp<IMediaSource> &source) {
     Mutex::Autolock l(mLock);
     if (mStarted) {
         ALOGE("Attempt to add source AFTER recording is started");
@@ -348,15 +380,18 @@
     const char *mime;
     source->getFormat()->findCString(kKeyMIMEType, &mime);
     const char *vp8 = MEDIA_MIMETYPE_VIDEO_VP8;
+    const char *vp9 = MEDIA_MIMETYPE_VIDEO_VP9;
     const char *vorbis = MEDIA_MIMETYPE_AUDIO_VORBIS;
 
     size_t streamIndex;
-    if (!strncasecmp(mime, vp8, strlen(vp8))) {
+    if (!strncasecmp(mime, vp8, strlen(vp8)) ||
+        !strncasecmp(mime, vp9, strlen(vp9))) {
         streamIndex = kVideoIndex;
     } else if (!strncasecmp(mime, vorbis, strlen(vorbis))) {
         streamIndex = kAudioIndex;
     } else {
-        ALOGE("Track (%s) other than %s or %s is not supported", mime, vp8, vorbis);
+        ALOGE("Track (%s) other than %s, %s or %s is not supported",
+              mime, vp8, vp9, vorbis);
         return ERROR_UNSUPPORTED;
     }
 
@@ -370,6 +405,11 @@
     // Go ahead to add the track.
     mStreams[streamIndex].mSource = source;
     mStreams[streamIndex].mTrackEntry = mStreams[streamIndex].mMakeTrack(source->getFormat());
+    if (mStreams[streamIndex].mTrackEntry == NULL) {
+        mStreams[streamIndex].mSource.clear();
+        return BAD_VALUE;
+    }
+    mStreamsInOrder.push_back(mStreams[streamIndex].mTrackEntry);
 
     return OK;
 }
@@ -410,7 +450,10 @@
             mTimeCodeScale = tcsl;
         }
     }
-    CHECK_GT(mTimeCodeScale, 0);
+    if (mTimeCodeScale == 0) {
+        ALOGE("movie time scale is 0");
+        return BAD_VALUE;
+    }
     ALOGV("movie time scale: %" PRIu64, mTimeCodeScale);
 
     /*
@@ -432,10 +475,8 @@
     info = WebmElement::SegmentInfo(mTimeCodeScale, 0);
 
     List<sp<WebmElement> > children;
-    for (size_t i = 0; i < kMaxStreams; ++i) {
-        if (mStreams[i].mTrackEntry != NULL) {
-            children.push_back(mStreams[i].mTrackEntry);
-        }
+    for (size_t i = 0; i < mStreamsInOrder.size(); ++i) {
+        children.push_back(mStreamsInOrder[i]);
     }
     tracks = new WebmMaster(kMkvTracks, children);
 
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/WebmWriter.h
index 4ad770e..dd1fba3 100644
--- a/media/libstagefright/webm/WebmWriter.h
+++ b/media/libstagefright/webm/WebmWriter.h
@@ -40,7 +40,7 @@
     ~WebmWriter() { reset(); }
 
 
-    virtual status_t addSource(const sp<MediaSource> &source);
+    virtual status_t addSource(const sp<IMediaSource> &source);
     virtual status_t start(MetaData *param = NULL);
     virtual status_t stop();
     virtual status_t pause();
@@ -85,7 +85,7 @@
         const char *mName;
         sp<WebmElement> (*mMakeTrack)(const sp<MetaData>&);
 
-        sp<MediaSource> mSource;
+        sp<IMediaSource> mSource;
         sp<WebmElement> mTrackEntry;
         sp<WebmFrameSourceThread> mThread;
         LinkedBlockingQueue<const sp<WebmFrame> > mSink;
@@ -110,6 +110,7 @@
         }
     };
     WebmStream mStreams[kMaxStreams];
+    Vector<sp<WebmElement>> mStreamsInOrder;
 
     sp<WebmFrameSinkThread> mSinkThread;
 
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index fb28624..ae4ac90 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -17,6 +17,7 @@
 LOCAL_C_INCLUDES:= \
         $(TOP)/frameworks/av/media/libstagefright \
         $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/native/include/media/hardware \
         $(TOP)/frameworks/av/media/libstagefright/mpeg2ts \
 
 LOCAL_SHARED_LIBRARIES:= \
@@ -32,6 +33,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_MODULE:= libstagefright_wfd
 
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index ed5a404..3587cb9 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -948,8 +948,9 @@
 
     if (isVideo) {
         format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
-        format->setInt32("store-metadata-in-buffers", true);
-        format->setInt32("store-metadata-in-buffers-output", (mHDCP != NULL)
+        format->setInt32(
+                "android._input-metadata-buffer-type", kMetadataBufferTypeANWBuffer);
+        format->setInt32("android._store-metadata-in-buffers-output", (mHDCP != NULL)
                 && (mHDCP->getCaps() & HDCPModule::HDCP_CAPS_ENCRYPT_NATIVE));
         format->setInt32(
                 "color-format", OMX_COLOR_FormatAndroidOpaque);
@@ -957,10 +958,12 @@
         format->setInt32("level-idc", levelIdc);
         format->setInt32("constraint-set", constraintSet);
     } else {
-        format->setString(
-                "mime",
-                usePCMAudio
-                    ? MEDIA_MIMETYPE_AUDIO_RAW : MEDIA_MIMETYPE_AUDIO_AAC);
+        if (usePCMAudio) {
+            format->setInt32("pcm-encoding", kAudioEncodingPcm16bit);
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
+        } else {
+            format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
+        }
     }
 
     notify = new AMessage(kWhatConverterNotify, this);
diff --git a/media/libstagefright/yuv/Android.mk b/media/libstagefright/yuv/Android.mk
index dc67288..f2fd3be 100644
--- a/media/libstagefright/yuv/Android.mk
+++ b/media/libstagefright/yuv/Android.mk
@@ -14,5 +14,6 @@
 
 LOCAL_CFLAGS += -Werror -Wall
 LOCAL_CLANG := true
+LOCAL_SANITIZE := signed-integer-overflow
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 7e017b9..1738df8 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -5,6 +5,7 @@
 LOCAL_SRC_FILES := register.cpp
 LOCAL_MODULE := libregistermsext
 LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS := -Werror -Wall
 include $(BUILD_STATIC_LIBRARY)
 endif
 
@@ -14,22 +15,16 @@
 	main_mediaserver.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libaudioflinger \
-	libaudiopolicyservice \
-	libcamera_metadata\
+	libcamera_metadata \
+	libcamera_client \
 	libcameraservice \
-	libicuuc \
-	libmedialogservice \
 	libresourcemanagerservice \
 	libcutils \
-	libnbaio \
 	libmedia \
 	libmediaplayerservice \
 	libutils \
-	liblog \
 	libbinder \
-	libsoundtriggerservice \
-	libradioservice
+	libicuuc \
 
 LOCAL_STATIC_LIBRARIES := \
         libicuandroid_utils \
@@ -37,21 +32,14 @@
 
 LOCAL_C_INCLUDES := \
     frameworks/av/media/libmediaplayerservice \
-    frameworks/av/media/libmedia \
-    frameworks/av/services/medialog \
-    frameworks/av/services/audioflinger \
-    frameworks/av/services/audiopolicy \
-    frameworks/av/services/audiopolicy/common/managerdefinitions/include \
-    frameworks/av/services/audiopolicy/common/include \
-    frameworks/av/services/audiopolicy/engine/interface \
     frameworks/av/services/camera/libcameraservice \
     frameworks/av/services/mediaresourcemanager \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/services/soundtrigger \
-    frameworks/av/services/radio \
-    external/sonic
 
 LOCAL_MODULE:= mediaserver
 LOCAL_32_BIT_ONLY := true
 
+LOCAL_INIT_RC := mediaserver.rc
+
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_EXECUTABLE)
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index 8cc9508..ecddc48 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -18,132 +18,30 @@
 #define LOG_TAG "mediaserver"
 //#define LOG_NDEBUG 0
 
-#include <fcntl.h>
-#include <sys/prctl.h>
-#include <sys/wait.h>
 #include <binder/IPCThreadState.h>
 #include <binder/ProcessState.h>
 #include <binder/IServiceManager.h>
-#include <cutils/properties.h>
 #include <utils/Log.h>
 #include "RegisterExtensions.h"
 
 // from LOCAL_C_INCLUDES
-#include "AudioFlinger.h"
-#include "CameraService.h"
 #include "IcuUtils.h"
-#include "MediaLogService.h"
 #include "MediaPlayerService.h"
 #include "ResourceManagerService.h"
-#include "service/AudioPolicyService.h"
-#include "MediaUtils.h"
-#include "SoundTriggerHwService.h"
-#include "RadioService.h"
 
 using namespace android;
 
-int main(int argc __unused, char** argv)
+int main(int argc __unused, char **argv __unused)
 {
-    limitProcessMemory(
-        "ro.media.maxmem", /* property that defines limit */
-        SIZE_MAX, /* upper limit in bytes */
-        65 /* upper limit as percentage of physical RAM */);
-
     signal(SIGPIPE, SIG_IGN);
-    char value[PROPERTY_VALUE_MAX];
-    bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
-    pid_t childPid;
-    // FIXME The advantage of making the process containing media.log service the parent process of
-    // the process that contains all the other real services, is that it allows us to collect more
-    // detailed information such as signal numbers, stop and continue, resource usage, etc.
-    // But it is also more complex.  Consider replacing this by independent processes, and using
-    // binder on death notification instead.
-    if (doLog && (childPid = fork()) != 0) {
-        // media.log service
-        //prctl(PR_SET_NAME, (unsigned long) "media.log", 0, 0, 0);
-        // unfortunately ps ignores PR_SET_NAME for the main thread, so use this ugly hack
-        strcpy(argv[0], "media.log");
-        sp<ProcessState> proc(ProcessState::self());
-        MediaLogService::instantiate();
-        ProcessState::self()->startThreadPool();
-        for (;;) {
-            siginfo_t info;
-            int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
-            if (ret == EINTR) {
-                continue;
-            }
-            if (ret < 0) {
-                break;
-            }
-            char buffer[32];
-            const char *code;
-            switch (info.si_code) {
-            case CLD_EXITED:
-                code = "CLD_EXITED";
-                break;
-            case CLD_KILLED:
-                code = "CLD_KILLED";
-                break;
-            case CLD_DUMPED:
-                code = "CLD_DUMPED";
-                break;
-            case CLD_STOPPED:
-                code = "CLD_STOPPED";
-                break;
-            case CLD_TRAPPED:
-                code = "CLD_TRAPPED";
-                break;
-            case CLD_CONTINUED:
-                code = "CLD_CONTINUED";
-                break;
-            default:
-                snprintf(buffer, sizeof(buffer), "unknown (%d)", info.si_code);
-                code = buffer;
-                break;
-            }
-            struct rusage usage;
-            getrusage(RUSAGE_CHILDREN, &usage);
-            ALOG(LOG_ERROR, "media.log", "pid %d status %d code %s user %ld.%03lds sys %ld.%03lds",
-                    info.si_pid, info.si_status, code,
-                    usage.ru_utime.tv_sec, usage.ru_utime.tv_usec / 1000,
-                    usage.ru_stime.tv_sec, usage.ru_stime.tv_usec / 1000);
-            sp<IServiceManager> sm = defaultServiceManager();
-            sp<IBinder> binder = sm->getService(String16("media.log"));
-            if (binder != 0) {
-                Vector<String16> args;
-                binder->dump(-1, args);
-            }
-            switch (info.si_code) {
-            case CLD_EXITED:
-            case CLD_KILLED:
-            case CLD_DUMPED: {
-                ALOG(LOG_INFO, "media.log", "exiting");
-                _exit(0);
-                // not reached
-                }
-            default:
-                break;
-            }
-        }
-    } else {
-        // all other services
-        if (doLog) {
-            prctl(PR_SET_PDEATHSIG, SIGKILL);   // if parent media.log dies before me, kill me also
-            setpgid(0, 0);                      // but if I die first, don't kill my parent
-        }
-        InitializeIcuOrDie();
-        sp<ProcessState> proc(ProcessState::self());
-        sp<IServiceManager> sm = defaultServiceManager();
-        ALOGI("ServiceManager: %p", sm.get());
-        AudioFlinger::instantiate();
-        MediaPlayerService::instantiate();
-        ResourceManagerService::instantiate();
-        CameraService::instantiate();
-        AudioPolicyService::instantiate();
-        SoundTriggerHwService::instantiate();
-        RadioService::instantiate();
-        registerExtensions();
-        ProcessState::self()->startThreadPool();
-        IPCThreadState::self()->joinThreadPool();
-    }
+
+    sp<ProcessState> proc(ProcessState::self());
+    sp<IServiceManager> sm(defaultServiceManager());
+    ALOGI("ServiceManager: %p", sm.get());
+    InitializeIcuOrDie();
+    MediaPlayerService::instantiate();
+    ResourceManagerService::instantiate();
+    registerExtensions();
+    ProcessState::self()->startThreadPool();
+    IPCThreadState::self()->joinThreadPool();
 }
diff --git a/media/mediaserver/mediaserver.rc b/media/mediaserver/mediaserver.rc
new file mode 100644
index 0000000..b777d5c
--- /dev/null
+++ b/media/mediaserver/mediaserver.rc
@@ -0,0 +1,6 @@
+service media /system/bin/mediaserver
+    class main
+    user media
+    group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk
index 3af0956..cb7e4aa 100644
--- a/media/mtp/Android.mk
+++ b/media/mtp/Android.mk
@@ -37,7 +37,7 @@
 
 LOCAL_MODULE:= libmtp
 
-LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST
+LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST -Wall -Wextra -Werror
 
 LOCAL_SHARED_LIBRARIES := libutils libcutils liblog libusbhost libbinder
 
diff --git a/media/mtp/MtpDataPacket.cpp b/media/mtp/MtpDataPacket.cpp
index 052b700..0381edf 100644
--- a/media/mtp/MtpDataPacket.cpp
+++ b/media/mtp/MtpDataPacket.cpp
@@ -25,8 +25,6 @@
 #include "MtpDataPacket.h"
 #include "MtpStringBuffer.h"
 
-#define MTP_BUFFER_SIZE 16384
-
 namespace android {
 
 MtpDataPacket::MtpDataPacket()
@@ -458,7 +456,7 @@
         // look at the length field to see if the data spans multiple packets
         uint32_t totalLength = MtpPacket::getUInt32(MTP_CONTAINER_LENGTH_OFFSET);
         allocate(totalLength);
-        while (totalLength > length) {
+        while (totalLength > static_cast<uint32_t>(length)) {
             request->buffer = mBuffer + length;
             request->buffer_length = totalLength - length;
             int ret = transfer(request);
@@ -525,16 +523,9 @@
 int MtpDataPacket::write(struct usb_request *request) {
     MtpPacket::putUInt32(MTP_CONTAINER_LENGTH_OFFSET, mPacketSize);
     MtpPacket::putUInt16(MTP_CONTAINER_TYPE_OFFSET, MTP_CONTAINER_TYPE_DATA);
-
-    // send header separately from data
     request->buffer = mBuffer;
-    request->buffer_length = MTP_CONTAINER_HEADER_SIZE;
+    request->buffer_length = mPacketSize;
     int ret = transfer(request);
-    if (ret == MTP_CONTAINER_HEADER_SIZE) {
-        request->buffer = mBuffer + MTP_CONTAINER_HEADER_SIZE;
-        request->buffer_length = mPacketSize - MTP_CONTAINER_HEADER_SIZE;
-        ret = transfer(request);
-    }
     return (ret < 0 ? ret : 0);
 }
 
@@ -547,17 +538,17 @@
 
 #endif // MTP_HOST
 
-void* MtpDataPacket::getData(int& outLength) const {
+void* MtpDataPacket::getData(int* outLength) const {
     int length = mPacketSize - MTP_CONTAINER_HEADER_SIZE;
     if (length > 0) {
         void* result = malloc(length);
         if (result) {
             memcpy(result, mBuffer + MTP_CONTAINER_HEADER_SIZE, length);
-            outLength = length;
+            *outLength = length;
             return result;
         }
     }
-    outLength = 0;
+    *outLength = 0;
     return NULL;
 }
 
diff --git a/media/mtp/MtpDataPacket.h b/media/mtp/MtpDataPacket.h
index 13d3bd9..6240f28 100644
--- a/media/mtp/MtpDataPacket.h
+++ b/media/mtp/MtpDataPacket.h
@@ -117,7 +117,7 @@
 
     inline bool         hasData() const { return mPacketSize > MTP_CONTAINER_HEADER_SIZE; }
     inline uint32_t     getContainerLength() const { return MtpPacket::getUInt32(MTP_CONTAINER_LENGTH_OFFSET); }
-    void*               getData(int& outLength) const;
+    void*               getData(int* outLength) const;
 };
 
 }; // namespace android
diff --git a/media/mtp/MtpDebug.cpp b/media/mtp/MtpDebug.cpp
index 9f3037d..1c04bcf 100644
--- a/media/mtp/MtpDebug.cpp
+++ b/media/mtp/MtpDebug.cpp
@@ -101,6 +101,7 @@
     { "MTP_FORMAT_TIFF_IT",                         0x380E },
     { "MTP_FORMAT_JP2",                             0x380F },
     { "MTP_FORMAT_JPX",                             0x3810 },
+    { "MTP_FORMAT_DNG",                             0x3811 },
     { "MTP_FORMAT_UNDEFINED_FIRMWARE",              0xB802 },
     { "MTP_FORMAT_WINDOWS_IMAGE_FORMAT",            0xB881 },
     { "MTP_FORMAT_UNDEFINED_AUDIO",                 0xB900 },
diff --git a/media/mtp/MtpDevice.cpp b/media/mtp/MtpDevice.cpp
index 3eafd6f..bd89a51 100644
--- a/media/mtp/MtpDevice.cpp
+++ b/media/mtp/MtpDevice.cpp
@@ -19,6 +19,7 @@
 #include "MtpDebug.h"
 #include "MtpDevice.h"
 #include "MtpDeviceInfo.h"
+#include "MtpEventPacket.h"
 #include "MtpObjectInfo.h"
 #include "MtpProperty.h"
 #include "MtpStorageInfo.h"
@@ -50,6 +51,19 @@
 }
 #endif
 
+namespace {
+
+bool writeToFd(void* data, uint32_t /* unused_offset */, uint32_t length, void* clientData) {
+    const int fd = *static_cast<int*>(clientData);
+    const ssize_t result = write(fd, data, length);
+    if (result < 0) {
+        return false;
+    }
+    return static_cast<uint32_t>(result) == length;
+}
+
+}  // namespace
+
 MtpDevice* MtpDevice::open(const char* deviceName, int fd) {
     struct usb_device *device = usb_device_new(deviceName, fd);
     if (!device) {
@@ -123,6 +137,10 @@
                     printf("no MTP string\n");
                 }
             }
+#else
+            else {
+                continue;
+            }
 #endif
             // if we got here, then we have a likely MTP or PTP device
 
@@ -163,7 +181,13 @@
                 return NULL;
             }
 
-            if (usb_device_claim_interface(device, interface->bInterfaceNumber)) {
+            int ret = usb_device_claim_interface(device, interface->bInterfaceNumber);
+            if (ret && errno == EBUSY) {
+                // disconnect kernel driver and try again
+                usb_device_connect_kernel_driver(device, interface->bInterfaceNumber, false);
+                ret = usb_device_claim_interface(device, interface->bInterfaceNumber);
+            }
+            if (ret) {
                 ALOGE("usb_device_claim_interface failed errno: %d\n", errno);
                 usb_device_close(device);
                 return NULL;
@@ -194,7 +218,9 @@
         mDeviceInfo(NULL),
         mSessionID(0),
         mTransactionID(0),
-        mReceivedResponse(false)
+        mReceivedResponse(false),
+        mProcessingEvent(false),
+        mCurrentEventHandle(0)
 {
     mRequestIn1 = usb_request_new(device, ep_in);
     mRequestIn2 = usb_request_new(device, ep_in);
@@ -414,7 +440,7 @@
     if (sendRequest(MTP_OPERATION_GET_THUMB) && readData()) {
         MtpResponseCode ret = readResponse();
         if (ret == MTP_RESPONSE_OK) {
-            return mData.getData(outLength);
+            return mData.getData(&outLength);
         }
     }
     outLength = 0;
@@ -430,8 +456,9 @@
         parent = MTP_PARENT_ROOT;
 
     mRequest.setParameter(1, info->mStorageID);
-    mRequest.setParameter(2, info->mParent);
+    mRequest.setParameter(2, parent);
 
+    mData.reset();
     mData.putUInt32(info->mStorageID);
     mData.putUInt16(info->mFormat);
     mData.putUInt16(info->mProtectionStatus);
@@ -472,21 +499,25 @@
     return (MtpObjectHandle)-1;
 }
 
-bool MtpDevice::sendObject(MtpObjectInfo* info, int srcFD) {
+bool MtpDevice::sendObject(MtpObjectHandle handle, int size, int srcFD) {
     Mutex::Autolock autoLock(mMutex);
 
-    int remaining = info->mCompressedSize;
+    int remaining = size;
     mRequest.reset();
-    mRequest.setParameter(1, info->mHandle);
+    mRequest.setParameter(1, handle);
+    bool error = false;
     if (sendRequest(MTP_OPERATION_SEND_OBJECT)) {
         // send data header
         writeDataHeader(MTP_OPERATION_SEND_OBJECT, remaining);
 
-        char buffer[65536];
+        // USB writes greater than 16K don't work
+        char buffer[MTP_BUFFER_SIZE];
         while (remaining > 0) {
             int count = read(srcFD, buffer, sizeof(buffer));
             if (count > 0) {
-                int written = mData.write(mRequestOut, buffer, count);
+                if (mData.write(mRequestOut, buffer, count) < 0) {
+                    error = true;
+                }
                 // FIXME check error
                 remaining -= count;
             } else {
@@ -495,7 +526,7 @@
         }
     }
     MtpResponseCode ret = readResponse();
-    return (remaining == 0 && ret == MTP_RESPONSE_OK);
+    return (remaining == 0 && ret == MTP_RESPONSE_OK && !error);
 }
 
 bool MtpDevice::deleteObject(MtpObjectHandle handle) {
@@ -580,7 +611,7 @@
         return NULL;
     if (!readData())
         return NULL;
-    MtpResponseCode ret = readResponse();
+    const MtpResponseCode ret = readResponse();
     if (ret == MTP_RESPONSE_OK) {
         MtpProperty* property = new MtpProperty;
         if (property->read(mData))
@@ -591,97 +622,31 @@
     return NULL;
 }
 
-bool MtpDevice::readObject(MtpObjectHandle handle,
-        bool (* callback)(void* data, int offset, int length, void* clientData),
-        size_t objectSize, void* clientData) {
+bool MtpDevice::getObjectPropValue(MtpObjectHandle handle, MtpProperty* property) {
+    if (property == nullptr)
+        return false;
+
     Mutex::Autolock autoLock(mMutex);
-    bool result = false;
 
     mRequest.reset();
     mRequest.setParameter(1, handle);
-    if (sendRequest(MTP_OPERATION_GET_OBJECT)
-            && mData.readDataHeader(mRequestIn1)) {
-        uint32_t length = mData.getContainerLength();
-        if (length - MTP_CONTAINER_HEADER_SIZE != objectSize) {
-            ALOGE("readObject error objectSize: %d, length: %d",
-                    objectSize, length);
-            goto fail;
-        }
-        length -= MTP_CONTAINER_HEADER_SIZE;
-        uint32_t remaining = length;
-        int offset = 0;
-
-        int initialDataLength = 0;
-        void* initialData = mData.getData(initialDataLength);
-        if (initialData) {
-            if (initialDataLength > 0) {
-                if (!callback(initialData, 0, initialDataLength, clientData))
-                    goto fail;
-                remaining -= initialDataLength;
-                offset += initialDataLength;
-            }
-            free(initialData);
-        }
-
-        // USB reads greater than 16K don't work
-        char buffer1[16384], buffer2[16384];
-        mRequestIn1->buffer = buffer1;
-        mRequestIn2->buffer = buffer2;
-        struct usb_request* req = mRequestIn1;
-        void* writeBuffer = NULL;
-        int writeLength = 0;
-
-        while (remaining > 0 || writeBuffer) {
-            if (remaining > 0) {
-                // queue up a read request
-                req->buffer_length = (remaining > sizeof(buffer1) ? sizeof(buffer1) : remaining);
-                if (mData.readDataAsync(req)) {
-                    ALOGE("readDataAsync failed");
-                    goto fail;
-                }
-            } else {
-                req = NULL;
-            }
-
-            if (writeBuffer) {
-                // write previous buffer
-                if (!callback(writeBuffer, offset, writeLength, clientData)) {
-                    ALOGE("write failed");
-                    // wait for pending read before failing
-                    if (req)
-                        mData.readDataWait(mDevice);
-                    goto fail;
-                }
-                offset += writeLength;
-                writeBuffer = NULL;
-            }
-
-            // wait for read to complete
-            if (req) {
-                int read = mData.readDataWait(mDevice);
-                if (read < 0)
-                    goto fail;
-
-                if (read > 0) {
-                    writeBuffer = req->buffer;
-                    writeLength = read;
-                    remaining -= read;
-                    req = (req == mRequestIn1 ? mRequestIn2 : mRequestIn1);
-                } else {
-                    writeBuffer = NULL;
-                }
-            }
-        }
-
-        MtpResponseCode response = readResponse();
-        if (response == MTP_RESPONSE_OK)
-            result = true;
-    }
-
-fail:
-    return result;
+    mRequest.setParameter(2, property->getPropertyCode());
+    if (!sendRequest(MTP_OPERATION_GET_OBJECT_PROP_VALUE))
+        return false;
+    if (!readData())
+        return false;
+    if (readResponse() != MTP_RESPONSE_OK)
+        return false;
+    property->setCurrentValue(mData);
+    return true;
 }
 
+bool MtpDevice::readObject(MtpObjectHandle handle,
+                           ReadObjectCallback callback,
+                           uint32_t expectedLength,
+                           void* clientData) {
+    return readObjectInternal(handle, callback, &expectedLength, clientData);
+}
 
 // reads the object's data and writes it to the specified file path
 bool MtpDevice::readObject(MtpObjectHandle handle, const char* destPath, int group, int perm) {
@@ -698,89 +663,171 @@
     fchmod(fd, perm);
     umask(mask);
 
+    bool result = readObject(handle, fd);
+    ::close(fd);
+    return result;
+}
+
+bool MtpDevice::readObject(MtpObjectHandle handle, int fd) {
+    ALOGD("readObject: %d", fd);
+    return readObjectInternal(handle, writeToFd, NULL /* expected size */, &fd);
+}
+
+bool MtpDevice::readObjectInternal(MtpObjectHandle handle,
+                                   ReadObjectCallback callback,
+                                   const uint32_t* expectedLength,
+                                   void* clientData) {
     Mutex::Autolock autoLock(mMutex);
-    bool result = false;
 
     mRequest.reset();
     mRequest.setParameter(1, handle);
-    if (sendRequest(MTP_OPERATION_GET_OBJECT)
-            && mData.readDataHeader(mRequestIn1)) {
-        uint32_t length = mData.getContainerLength();
-        if (length < MTP_CONTAINER_HEADER_SIZE)
-            goto fail;
-        length -= MTP_CONTAINER_HEADER_SIZE;
-        uint32_t remaining = length;
+    if (!sendRequest(MTP_OPERATION_GET_OBJECT)) {
+        ALOGE("Failed to send a read request.");
+        return false;
+    }
 
+    return readData(callback, expectedLength, nullptr, clientData);
+}
+
+bool MtpDevice::readData(ReadObjectCallback callback,
+                            const uint32_t* expectedLength,
+                            uint32_t* writtenSize,
+                            void* clientData) {
+    if (!mData.readDataHeader(mRequestIn1)) {
+        ALOGE("Failed to read header.");
+        return false;
+    }
+
+    // If object size 0 byte, the remote device can reply response packet
+    // without sending any data packets.
+    if (mData.getContainerType() == MTP_CONTAINER_TYPE_RESPONSE) {
+        mResponse.copyFrom(mData);
+        return mResponse.getResponseCode() == MTP_RESPONSE_OK;
+    }
+
+    const uint32_t fullLength = mData.getContainerLength();
+    if (fullLength < MTP_CONTAINER_HEADER_SIZE) {
+        ALOGE("fullLength is too short: %d", fullLength);
+        return false;
+    }
+    const uint32_t length = fullLength - MTP_CONTAINER_HEADER_SIZE;
+    if (expectedLength && length != *expectedLength) {
+        ALOGE("readObject error length: %d", fullLength);
+        return false;
+    }
+
+    uint32_t offset = 0;
+    bool writingError = false;
+
+    {
         int initialDataLength = 0;
-        void* initialData = mData.getData(initialDataLength);
+        void* const initialData = mData.getData(&initialDataLength);
         if (initialData) {
             if (initialDataLength > 0) {
-                if (write(fd, initialData, initialDataLength) != initialDataLength) {
-                    free(initialData);
-                    goto fail;
+                if (!callback(initialData, offset, initialDataLength, clientData)) {
+                    ALOGE("Failed to write initial data.");
+                    writingError = true;
                 }
-                remaining -= initialDataLength;
+                offset += initialDataLength;
             }
             free(initialData);
         }
+    }
 
-        // USB reads greater than 16K don't work
-        char buffer1[16384], buffer2[16384];
-        mRequestIn1->buffer = buffer1;
-        mRequestIn2->buffer = buffer2;
-        struct usb_request* req = mRequestIn1;
+    // USB reads greater than 16K don't work.
+    char buffer1[MTP_BUFFER_SIZE], buffer2[MTP_BUFFER_SIZE];
+    mRequestIn1->buffer = buffer1;
+    mRequestIn2->buffer = buffer2;
+    struct usb_request* req = NULL;
+
+    while (offset < length) {
+        // Wait for previous read to complete.
         void* writeBuffer = NULL;
         int writeLength = 0;
-
-        while (remaining > 0 || writeBuffer) {
-            if (remaining > 0) {
-                // queue up a read request
-                req->buffer_length = (remaining > sizeof(buffer1) ? sizeof(buffer1) : remaining);
-                if (mData.readDataAsync(req)) {
-                    ALOGE("readDataAsync failed");
-                    goto fail;
-                }
-            } else {
-                req = NULL;
+        if (req) {
+            const int read = mData.readDataWait(mDevice);
+            if (read < 0) {
+                ALOGE("readDataWait failed.");
+                return false;
             }
+            writeBuffer = req->buffer;
+            writeLength = read;
+        }
 
-            if (writeBuffer) {
-                // write previous buffer
-                if (write(fd, writeBuffer, writeLength) != writeLength) {
-                    ALOGE("write failed");
-                    // wait for pending read before failing
-                    if (req)
-                        mData.readDataWait(mDevice);
-                    goto fail;
-                }
-                writeBuffer = NULL;
-            }
-
-            // wait for read to complete
-            if (req) {
-                int read = mData.readDataWait(mDevice);
-                if (read < 0)
-                    goto fail;
-
-                if (read > 0) {
-                    writeBuffer = req->buffer;
-                    writeLength = read;
-                    remaining -= read;
-                    req = (req == mRequestIn1 ? mRequestIn2 : mRequestIn1);
-                } else {
-                    writeBuffer = NULL;
-                }
+        // Request to read next chunk.
+        const uint32_t nextOffset = offset + writeLength;
+        if (nextOffset < length) {
+            // Queue up a read request.
+            const size_t remaining = length - nextOffset;
+            req = (req == mRequestIn1 ? mRequestIn2 : mRequestIn1);
+            req->buffer_length = remaining > MTP_BUFFER_SIZE ?
+                    static_cast<size_t>(MTP_BUFFER_SIZE) : remaining;
+            if (mData.readDataAsync(req) != 0) {
+                ALOGE("readDataAsync failed");
+                return false;
             }
         }
 
-        MtpResponseCode response = readResponse();
-        if (response == MTP_RESPONSE_OK)
-            result = true;
+        // Write previous buffer.
+        if (writeBuffer && !writingError) {
+            if (!callback(writeBuffer, offset, writeLength, clientData)) {
+                ALOGE("write failed");
+                writingError = true;
+            }
+        }
+        offset = nextOffset;
     }
 
-fail:
-    ::close(fd);
-    return result;
+    if (writtenSize) {
+        *writtenSize = length;
+    }
+
+    return readResponse() == MTP_RESPONSE_OK;
+}
+
+bool MtpDevice::readPartialObject(MtpObjectHandle handle,
+                                  uint32_t offset,
+                                  uint32_t size,
+                                  uint32_t *writtenSize,
+                                  ReadObjectCallback callback,
+                                  void* clientData) {
+    Mutex::Autolock autoLock(mMutex);
+
+    mRequest.reset();
+    mRequest.setParameter(1, handle);
+    mRequest.setParameter(2, offset);
+    mRequest.setParameter(3, size);
+    if (!sendRequest(MTP_OPERATION_GET_PARTIAL_OBJECT)) {
+        ALOGE("Failed to send a read request.");
+        return false;
+    }
+    // The expected size is null because it requires the exact number of bytes to read though
+    // MTP_OPERATION_GET_PARTIAL_OBJECT allows devices to return shorter length of bytes than
+    // requested. Destination's buffer length should be checked in |callback|.
+    return readData(callback, nullptr /* expected size */, writtenSize, clientData);
+}
+
+bool MtpDevice::readPartialObject64(MtpObjectHandle handle,
+                                    uint64_t offset,
+                                    uint32_t size,
+                                    uint32_t *writtenSize,
+                                    ReadObjectCallback callback,
+                                    void* clientData) {
+    Mutex::Autolock autoLock(mMutex);
+
+    mRequest.reset();
+    mRequest.setParameter(1, handle);
+    mRequest.setParameter(2, 0xffffffff & offset);
+    mRequest.setParameter(3, 0xffffffff & (offset >> 32));
+    mRequest.setParameter(4, size);
+    if (!sendRequest(MTP_OPERATION_GET_PARTIAL_OBJECT_64)) {
+        ALOGE("Failed to send a read request.");
+        return false;
+    }
+    // The expected size is null because it requires the exact number of bytes to read though
+    // MTP_OPERATION_GET_PARTIAL_OBJECT_64 allows devices to return shorter length of bytes than
+    // requested. Destination's buffer length should be checked in |callback|.
+    return readData(callback, nullptr /* expected size */, writtenSize, clientData);
 }
 
 bool MtpDevice::sendRequest(MtpOperationCode operation) {
@@ -800,7 +847,7 @@
     mData.setTransactionID(mRequest.getTransactionID());
     int ret = mData.write(mRequestOut);
     mData.dump();
-    return (ret > 0);
+    return (ret >= 0);
 }
 
 bool MtpDevice::readData() {
@@ -851,4 +898,44 @@
     }
 }
 
+int MtpDevice::submitEventRequest() {
+    if (mEventMutex.tryLock()) {
+        // An event is being reaped on another thread.
+        return -1;
+    }
+    if (mProcessingEvent) {
+        // An event request was submitted, but no reapEventRequest called so far.
+        return -1;
+    }
+    Mutex::Autolock autoLock(mEventMutexForInterrupt);
+    mEventPacket.sendRequest(mRequestIntr);
+    const int currentHandle = ++mCurrentEventHandle;
+    mProcessingEvent = true;
+    mEventMutex.unlock();
+    return currentHandle;
+}
+
+int MtpDevice::reapEventRequest(int handle, uint32_t (*parameters)[3]) {
+    Mutex::Autolock autoLock(mEventMutex);
+    if (!mProcessingEvent || mCurrentEventHandle != handle || !parameters) {
+        return -1;
+    }
+    mProcessingEvent = false;
+    const int readSize = mEventPacket.readResponse(mRequestIntr->dev);
+    const int result = mEventPacket.getEventCode();
+    // MTP event has three parameters.
+    (*parameters)[0] = mEventPacket.getParameter(1);
+    (*parameters)[1] = mEventPacket.getParameter(2);
+    (*parameters)[2] = mEventPacket.getParameter(3);
+    return readSize != 0 ? result : 0;
+}
+
+void MtpDevice::discardEventRequest(int handle) {
+    Mutex::Autolock autoLock(mEventMutexForInterrupt);
+    if (mCurrentEventHandle != handle) {
+        return;
+    }
+    usb_request_cancel(mRequestIntr);
+}
+
 }  // namespace android
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index 9b0acbf..4be44cf 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -17,8 +17,9 @@
 #ifndef _MTP_DEVICE_H
 #define _MTP_DEVICE_H
 
-#include "MtpRequestPacket.h"
+#include "MtpEventPacket.h"
 #include "MtpDataPacket.h"
+#include "MtpRequestPacket.h"
 #include "MtpResponsePacket.h"
 #include "MtpTypes.h"
 
@@ -31,6 +32,7 @@
 namespace android {
 
 class MtpDeviceInfo;
+class MtpEventPacket;
 class MtpObjectInfo;
 class MtpStorageInfo;
 
@@ -53,17 +55,27 @@
     MtpRequestPacket        mRequest;
     MtpDataPacket           mData;
     MtpResponsePacket       mResponse;
+    MtpEventPacket          mEventPacket;
+
     // set to true if we received a response packet instead of a data packet
     bool                    mReceivedResponse;
+    bool                    mProcessingEvent;
+    int                     mCurrentEventHandle;
 
     // to ensure only one MTP transaction at a time
     Mutex                   mMutex;
+    Mutex                   mEventMutex;
+    Mutex                   mEventMutexForInterrupt;
 
 public:
-                            MtpDevice(struct usb_device* device, int interface,
-                                    const struct usb_endpoint_descriptor *ep_in,
-                                    const struct usb_endpoint_descriptor *ep_out,
-                                    const struct usb_endpoint_descriptor *ep_intr);
+    typedef bool (*ReadObjectCallback)
+            (void* data, uint32_t offset, uint32_t length, void* clientData);
+
+    MtpDevice(struct usb_device* device,
+              int interface,
+              const struct usb_endpoint_descriptor *ep_in,
+              const struct usb_endpoint_descriptor *ep_out,
+              const struct usb_endpoint_descriptor *ep_intr);
 
     static MtpDevice*       open(const char* deviceName, int fd);
 
@@ -85,7 +97,7 @@
     MtpObjectInfo*          getObjectInfo(MtpObjectHandle handle);
     void*                   getThumbnail(MtpObjectHandle handle, int& outLength);
     MtpObjectHandle         sendObjectInfo(MtpObjectInfo* info);
-    bool                    sendObject(MtpObjectInfo* info, int srcFD);
+    bool                    sendObject(MtpObjectHandle handle, int size, int srcFD);
     bool                    deleteObject(MtpObjectHandle handle);
     MtpObjectHandle         getParent(MtpObjectHandle handle);
     MtpObjectHandle         getStorageID(MtpObjectHandle handle);
@@ -95,20 +107,56 @@
     MtpProperty*            getDevicePropDesc(MtpDeviceProperty code);
     MtpProperty*            getObjectPropDesc(MtpObjectProperty code, MtpObjectFormat format);
 
-    bool                    readObject(MtpObjectHandle handle,
-                                    bool (* callback)(void* data, int offset,
-                                            int length, void* clientData),
-                                    size_t objectSize, void* clientData);
+    // Reads value of |property| for |handle|. Returns true on success.
+    bool                    getObjectPropValue(MtpObjectHandle handle, MtpProperty* property);
+
+    bool                    readObject(MtpObjectHandle handle, ReadObjectCallback callback,
+                                    uint32_t objectSize, void* clientData);
     bool                    readObject(MtpObjectHandle handle, const char* destPath, int group,
                                     int perm);
+    bool                    readObject(MtpObjectHandle handle, int fd);
+    bool                    readPartialObject(MtpObjectHandle handle,
+                                              uint32_t offset,
+                                              uint32_t size,
+                                              uint32_t *writtenSize,
+                                              ReadObjectCallback callback,
+                                              void* clientData);
+    bool                    readPartialObject64(MtpObjectHandle handle,
+                                                uint64_t offset,
+                                                uint32_t size,
+                                                uint32_t *writtenSize,
+                                                ReadObjectCallback callback,
+                                                void* clientData);
+    // Starts a request to read MTP event from MTP device. It returns a request handle that
+    // can be used for blocking read or cancel. If other thread has already been processing an
+    // event returns -1.
+    int                     submitEventRequest();
+    // Waits for MTP event from the device and returns MTP event code. It blocks the current thread
+    // until it receives an event from the device. |handle| should be a request handle returned
+    // by |submitEventRequest|. The function writes event parameters to |parameters|. Returns 0 for
+    // cancellations. Returns -1 for errors.
+    int                     reapEventRequest(int handle, uint32_t (*parameters)[3]);
+    // Cancels an event request. |handle| should be request handle returned by
+    // |submitEventRequest|. If there is a thread blocked by |reapEventRequest| with the same
+    // |handle|, the thread will resume.
+    void                    discardEventRequest(int handle);
 
 private:
+    // If |objectSize| is not NULL, it checks object size before reading data bytes.
+    bool                    readObjectInternal(MtpObjectHandle handle,
+                                               ReadObjectCallback callback,
+                                               const uint32_t* objectSize,
+                                               void* clientData);
+    // If |objectSize| is not NULL, it checks object size before reading data bytes.
+    bool                    readData(ReadObjectCallback callback,
+                                     const uint32_t* objectSize,
+                                     uint32_t* writtenData,
+                                     void* clientData);
     bool                    sendRequest(MtpOperationCode operation);
     bool                    sendData();
     bool                    readData();
     bool                    writeDataHeader(MtpOperationCode operation, int dataLength);
     MtpResponseCode         readResponse();
-
 };
 
 }; // namespace android
diff --git a/media/mtp/MtpEventPacket.cpp b/media/mtp/MtpEventPacket.cpp
index d2fca42..8e13ea9 100644
--- a/media/mtp/MtpEventPacket.cpp
+++ b/media/mtp/MtpEventPacket.cpp
@@ -54,17 +54,26 @@
 #endif
 
 #ifdef MTP_HOST
-int MtpEventPacket::read(struct usb_request *request) {
+int MtpEventPacket::sendRequest(struct usb_request *request) {
     request->buffer = mBuffer;
     request->buffer_length = mBufferSize;
-    int ret = transfer(request);
-     if (ret >= 0)
-        mPacketSize = ret;
-    else
-        mPacketSize = 0;
-    return ret;
+    mPacketSize = 0;
+    if (usb_request_queue(request)) {
+        ALOGE("usb_endpoint_queue failed, errno: %d", errno);
+        return -1;
+    }
+    return 0;
+}
+
+int MtpEventPacket::readResponse(struct usb_device *device) {
+    struct usb_request* const req = usb_request_wait(device);
+    if (req) {
+        mPacketSize = req->actual_length;
+        return req->actual_length;
+    } else {
+        return -1;
+    }
 }
 #endif
 
 }  // namespace android
-
diff --git a/media/mtp/MtpEventPacket.h b/media/mtp/MtpEventPacket.h
index 660baad..a8779fd 100644
--- a/media/mtp/MtpEventPacket.h
+++ b/media/mtp/MtpEventPacket.h
@@ -35,7 +35,8 @@
 
 #ifdef MTP_HOST
     // read our buffer with the given request
-    int                 read(struct usb_request *request);
+    int                 sendRequest(struct usb_request *request);
+    int                 readResponse(struct usb_device *device);
 #endif
 
     inline MtpEventCode     getEventCode() const { return getContainerCode(); }
diff --git a/media/mtp/MtpPacket.cpp b/media/mtp/MtpPacket.cpp
index bab1335..35ecb4f 100644
--- a/media/mtp/MtpPacket.cpp
+++ b/media/mtp/MtpPacket.cpp
@@ -69,7 +69,7 @@
     char buffer[500];
     char* bufptr = buffer;
 
-    for (int i = 0; i < mPacketSize; i++) {
+    for (size_t i = 0; i < mPacketSize; i++) {
         sprintf(bufptr, "%02X ", mBuffer[i]);
         bufptr += strlen(bufptr);
         if (i % DUMP_BYTES_PER_ROW == (DUMP_BYTES_PER_ROW - 1)) {
diff --git a/media/mtp/MtpPacket.h b/media/mtp/MtpPacket.h
index 037722a..0e96309 100644
--- a/media/mtp/MtpPacket.h
+++ b/media/mtp/MtpPacket.h
@@ -19,6 +19,7 @@
 
 #include "MtpTypes.h"
 
+struct usb_device;
 struct usb_request;
 
 namespace android {
diff --git a/media/mtp/MtpProperty.cpp b/media/mtp/MtpProperty.cpp
index d58e2a4..039e4f5 100644
--- a/media/mtp/MtpProperty.cpp
+++ b/media/mtp/MtpProperty.cpp
@@ -236,6 +236,12 @@
         mCurrentValue.str = NULL;
 }
 
+void MtpProperty::setCurrentValue(MtpDataPacket& packet) {
+    free(mCurrentValue.str);
+    mCurrentValue.str = NULL;
+    readValue(packet, mCurrentValue);
+}
+
 void MtpProperty::setFormRange(int min, int max, int step) {
     mFormFlag = kFormRange;
     switch (mType) {
@@ -544,7 +550,7 @@
     MtpPropertyValue* result = new MtpPropertyValue[length];
     for (uint32_t i = 0; i < length; i++)
         if (!readValue(packet, result[i])) {
-            delete result;
+            delete [] result;
             return NULL;
         }
     return result;
diff --git a/media/mtp/MtpProperty.h b/media/mtp/MtpProperty.h
index 2e2ead1..03c08e1 100644
--- a/media/mtp/MtpProperty.h
+++ b/media/mtp/MtpProperty.h
@@ -81,13 +81,16 @@
                                      int defaultValue = 0);
     virtual             ~MtpProperty();
 
-    inline MtpPropertyCode getPropertyCode() const { return mCode; }
+    MtpPropertyCode getPropertyCode() const { return mCode; }
+    MtpDataType getDataType() const { return mType; }
 
     bool                read(MtpDataPacket& packet);
     void                write(MtpDataPacket& packet);
 
     void                setDefaultValue(const uint16_t* string);
     void                setCurrentValue(const uint16_t* string);
+    void                setCurrentValue(MtpDataPacket& packet);
+    const MtpPropertyValue& getCurrentValue() { return mCurrentValue; }
 
     void                setFormRange(int min, int max, int step);
     void                setFormEnum(const int* values, int count);
diff --git a/media/mtp/MtpRequestPacket.cpp b/media/mtp/MtpRequestPacket.cpp
index 40b11b0..471967f 100644
--- a/media/mtp/MtpRequestPacket.cpp
+++ b/media/mtp/MtpRequestPacket.cpp
@@ -44,11 +44,12 @@
     }
 
     // request packet should have 12 byte header followed by 0 to 5 32-bit arguments
-    if (ret >= MTP_CONTAINER_HEADER_SIZE
-            && ret <= MTP_CONTAINER_HEADER_SIZE + 5 * sizeof(uint32_t)
-            && ((ret - MTP_CONTAINER_HEADER_SIZE) & 3) == 0) {
-        mPacketSize = ret;
-        mParameterCount = (ret - MTP_CONTAINER_HEADER_SIZE) / sizeof(uint32_t);
+    const size_t read_size = static_cast<size_t>(ret);
+    if (read_size >= MTP_CONTAINER_HEADER_SIZE
+            && read_size <= MTP_CONTAINER_HEADER_SIZE + 5 * sizeof(uint32_t)
+            && ((read_size - MTP_CONTAINER_HEADER_SIZE) & 3) == 0) {
+        mPacketSize = read_size;
+        mParameterCount = (read_size - MTP_CONTAINER_HEADER_SIZE) / sizeof(uint32_t);
     } else {
         ALOGE("Malformed MTP request packet");
         ret = -1;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 07199e3..90f1a774 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -214,10 +214,11 @@
             mResponse.setTransactionID(transaction);
             ALOGV("sending response %04X", mResponse.getResponseCode());
             ret = mResponse.write(fd);
+            const int savedErrno = errno;
             mResponse.dump();
             if (ret < 0) {
                 ALOGE("request write returned %d, errno: %d", ret, errno);
-                if (errno == ECANCELED) {
+                if (savedErrno == ECANCELED) {
                     // return to top of loop and wait for next command
                     continue;
                 }
@@ -787,15 +788,19 @@
 
     // then transfer the file
     int ret = ioctl(mFD, MTP_SEND_FILE_WITH_HEADER, (unsigned long)&mfr);
+    if (ret < 0) {
+        if (errno == ECANCELED) {
+            result = MTP_RESPONSE_TRANSACTION_CANCELLED;
+        } else {
+            result = MTP_RESPONSE_GENERAL_ERROR;
+        }
+    } else {
+        result = MTP_RESPONSE_OK;
+    }
+
     ALOGV("MTP_SEND_FILE_WITH_HEADER returned %d\n", ret);
     close(mfr.fd);
-    if (ret < 0) {
-        if (errno == ECANCELED)
-            return MTP_RESPONSE_TRANSACTION_CANCELLED;
-        else
-            return MTP_RESPONSE_GENERAL_ERROR;
-    }
-    return MTP_RESPONSE_OK;
+    return result;
 }
 
 MtpResponseCode MtpServer::doGetThumb() {
@@ -864,14 +869,15 @@
     // transfer the file
     int ret = ioctl(mFD, MTP_SEND_FILE_WITH_HEADER, (unsigned long)&mfr);
     ALOGV("MTP_SEND_FILE_WITH_HEADER returned %d\n", ret);
-    close(mfr.fd);
+    result = MTP_RESPONSE_OK;
     if (ret < 0) {
         if (errno == ECANCELED)
-            return MTP_RESPONSE_TRANSACTION_CANCELLED;
+            result = MTP_RESPONSE_TRANSACTION_CANCELLED;
         else
-            return MTP_RESPONSE_GENERAL_ERROR;
+            result = MTP_RESPONSE_GENERAL_ERROR;
     }
-    return MTP_RESPONSE_OK;
+    close(mfr.fd);
+    return result;
 }
 
 MtpResponseCode MtpServer::doSendObjectInfo() {
@@ -917,9 +923,7 @@
     if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER;  // image bit depth
     if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER;  // parent
     if (!mData.getUInt16(temp16)) return MTP_RESPONSE_INVALID_PARAMETER;
-    uint16_t associationType = temp16;
     if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER;
-    uint32_t associationDesc = temp32;        // association desc
     if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER;  // sequence number
     MtpStringBuffer name, created, modified;
     if (!mData.getString(name)) return MTP_RESPONSE_INVALID_PARAMETER;    // file name
@@ -985,6 +989,7 @@
     MtpResponseCode result = MTP_RESPONSE_OK;
     mode_t mask;
     int ret, initialData;
+    bool isCanceled = false;
 
     if (mSendObjectHandle == kInvalidObjectHandle) {
         ALOGE("Expected SendObjectInfo before SendObject");
@@ -1032,6 +1037,10 @@
             ALOGV("receiving %s\n", (const char *)mSendObjectFilePath);
             // transfer the file
             ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+            if ((ret < 0) && (errno == ECANCELED)) {
+                isCanceled = true;
+            }
+
             ALOGV("MTP_RECEIVE_FILE returned %d\n", ret);
         }
     }
@@ -1039,7 +1048,7 @@
 
     if (ret < 0) {
         unlink(mSendObjectFilePath);
-        if (errno == ECANCELED)
+        if (isCanceled)
             result = MTP_RESPONSE_TRANSACTION_CANCELLED;
         else
             result = MTP_RESPONSE_GENERAL_ERROR;
@@ -1091,7 +1100,6 @@
         }
         strcpy(fileSpot, name);
 
-        int type = entry->d_type;
         if (entry->d_type == DT_DIR) {
             deleteRecursive(pathbuf);
             rmdir(pathbuf);
@@ -1208,6 +1216,7 @@
         length -= initialData;
     }
 
+    bool isCanceled = false;
     if (ret < 0) {
         ALOGE("failed to write initial data");
     } else {
@@ -1219,12 +1228,15 @@
 
             // transfer the file
             ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+            if ((ret < 0) && (errno == ECANCELED)) {
+                isCanceled = true;
+            }
             ALOGV("MTP_RECEIVE_FILE returned %d", ret);
         }
     }
     if (ret < 0) {
         mResponse.setParameter(1, 0);
-        if (errno == ECANCELED)
+        if (isCanceled)
             return MTP_RESPONSE_TRANSACTION_CANCELLED;
         else
             return MTP_RESPONSE_GENERAL_ERROR;
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 0667bdd..ebf3601 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -19,8 +19,6 @@
 #include <stdio.h>
 #include <time.h>
 
-#include <../private/bionic_time.h> /* TODO: switch this code to icu4c! */
-
 #include "MtpUtils.h"
 
 namespace android {
@@ -32,38 +30,40 @@
 DD replaced by the day (01-31), T is a constant character 'T' delimiting time from date,
 hh is replaced by the hour (00-23), mm is replaced by the minute (00-59), and ss by the
 second (00-59). The ".s" is optional, and represents tenths of a second.
+This is followed by a UTC offset given as "[+-]zzzz" or the literal "Z", meaning UTC.
 */
 
 bool parseDateTime(const char* dateTime, time_t& outSeconds) {
     int year, month, day, hour, minute, second;
-    struct tm tm;
-
     if (sscanf(dateTime, "%04d%02d%02dT%02d%02d%02d",
-            &year, &month, &day, &hour, &minute, &second) != 6)
+               &year, &month, &day, &hour, &minute, &second) != 6)
         return false;
-    const char* tail = dateTime + 15;
+
     // skip optional tenth of second
-    if (tail[0] == '.' && tail[1])
-        tail += 2;
-    //FIXME - support +/-hhmm
+    const char* tail = dateTime + 15;
+    if (tail[0] == '.' && tail[1]) tail += 2;
+
+    // FIXME: "Z" means UTC, but non-"Z" doesn't mean local time.
+    // It might be that you're in Asia/Seoul on vacation and your Android
+    // device has noticed this via the network, but your camera was set to
+    // America/Los_Angeles once when you bought it and doesn't know where
+    // it is right now, so the camera says "20160106T081700-0800" but we
+    // just ignore the "-0800" and assume local time which is actually "+0900".
+    // I think to support this (without switching to Java or using icu4c)
+    // you'd want to always use timegm(3) and then manually add/subtract
+    // the UTC offset parsed from the string (taking care of wrapping).
+    // mktime(3) ignores the tm_gmtoff field, so you can't let it do the work.
     bool useUTC = (tail[0] == 'Z');
 
-    // hack to compute timezone
-    time_t dummy;
-    localtime_r(&dummy, &tm);
-
+    struct tm tm = {};
     tm.tm_sec = second;
     tm.tm_min = minute;
     tm.tm_hour = hour;
     tm.tm_mday = day;
     tm.tm_mon = month - 1;  // mktime uses months in 0 - 11 range
     tm.tm_year = year - 1900;
-    tm.tm_wday = 0;
     tm.tm_isdst = -1;
-    if (useUTC)
-        outSeconds = mktime(&tm);
-    else
-        outSeconds = mktime_tz(&tm, tm.tm_zone);
+    outSeconds = useUTC ? timegm(&tm) : mktime(&tm);
 
     return true;
 }
@@ -73,7 +73,7 @@
 
     localtime_r(&seconds, &tm);
     snprintf(buffer, bufferLength, "%04d%02d%02dT%02d%02d%02d",
-        tm.tm_year + 1900, 
+        tm.tm_year + 1900,
         tm.tm_mon + 1, // localtime_r uses months in 0 - 11 range
         tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
 }
diff --git a/media/mtp/mtp.h b/media/mtp/mtp.h
index d270df5..adfb102 100644
--- a/media/mtp/mtp.h
+++ b/media/mtp/mtp.h
@@ -37,6 +37,9 @@
 #define MTP_CONTAINER_PARAMETER_OFFSET          12
 #define MTP_CONTAINER_HEADER_SIZE               12
 
+// Maximum buffer size for a MTP packet.
+#define MTP_BUFFER_SIZE 16384
+
 // MTP Data Types
 #define MTP_TYPE_UNDEFINED      0x0000          // Undefined
 #define MTP_TYPE_INT8           0x0001          // Signed 8-bit integer
@@ -90,6 +93,7 @@
 #define MTP_FORMAT_TIFF_IT                              0x380E   // Tag Image File Format for Information Technology (graphic arts)
 #define MTP_FORMAT_JP2                                  0x380F   // JPEG2000 Baseline File Format
 #define MTP_FORMAT_JPX                                  0x3810   // JPEG2000 Extended File Format
+#define MTP_FORMAT_DNG                                  0x3811   // Digital Negative
 #define MTP_FORMAT_UNDEFINED_FIRMWARE                   0xB802
 #define MTP_FORMAT_WINDOWS_IMAGE_FORMAT                 0xB881
 #define MTP_FORMAT_UNDEFINED_AUDIO                      0xB900
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index 8f795cd..7f6b66b 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -27,25 +27,34 @@
                   NdkMediaFormat.cpp                    \
                   NdkMediaMuxer.cpp                     \
                   NdkMediaDrm.cpp                       \
+                  NdkImage.cpp                          \
+                  NdkImageReader.cpp                    \
 
 LOCAL_MODULE:= libmediandk
 
 LOCAL_C_INCLUDES := \
     bionic/libc/private \
     frameworks/base/core/jni \
-    frameworks/av/include/ndk
+    frameworks/av/include/ndk \
+    system/media/camera/include
 
 LOCAL_CFLAGS += -fvisibility=hidden -D EXPORT='__attribute__ ((visibility ("default")))'
 
+LOCAL_CFLAGS += -Werror
+
 LOCAL_SHARED_LIBRARIES := \
     libbinder \
     libmedia \
+    libmediadrm \
     libstagefright \
     libstagefright_foundation \
     liblog \
     libutils \
+    libcutils \
     libandroid_runtime \
     libbinder \
+    libgui \
+    libui \
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
new file mode 100644
index 0000000..40900ad
--- /dev/null
+++ b/media/ndk/NdkImage.cpp
@@ -0,0 +1,631 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkImage"
+
+#include "NdkImagePriv.h"
+#include "NdkImageReaderPriv.h"
+
+#include <utils/Log.h>
+#include "hardware/camera3.h"
+
+using namespace android;
+
+#define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) )
+
+AImage::AImage(AImageReader* reader, int32_t format,
+        CpuConsumer::LockedBuffer* buffer, int64_t timestamp,
+        int32_t width, int32_t height, int32_t numPlanes) :
+        mReader(reader), mFormat(format),
+        mBuffer(buffer), mTimestamp(timestamp),
+        mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
+}
+
+// Can only be called by free() with mLock hold
+AImage::~AImage() {
+    if (!mIsClosed) {
+        LOG_ALWAYS_FATAL(
+                "Error: AImage %p is deleted before returning buffer to AImageReader!", this);
+    }
+}
+
+bool
+AImage::isClosed() const {
+    Mutex::Autolock _l(mLock);
+    return mIsClosed;
+}
+
+void
+AImage::close() {
+    Mutex::Autolock _l(mLock);
+    if (mIsClosed) {
+        return;
+    }
+    sp<AImageReader> reader = mReader.promote();
+    if (reader == nullptr) {
+        LOG_ALWAYS_FATAL("Error: AImage not closed before AImageReader close!");
+        return;
+    }
+    reader->releaseImageLocked(this);
+    // Should have been set to nullptr in releaseImageLocked
+    // Set to nullptr here for extra safety only
+    mBuffer = nullptr;
+    mIsClosed = true;
+}
+
+void
+AImage::free() {
+    if (!isClosed()) {
+        ALOGE("Cannot free AImage before close!");
+        return;
+    }
+    Mutex::Autolock _l(mLock);
+    delete this;
+}
+
+void
+AImage::lockReader() const {
+    sp<AImageReader> reader = mReader.promote();
+    if (reader == nullptr) {
+        // Reader has been closed
+        return;
+    }
+    reader->mLock.lock();
+}
+
+void
+AImage::unlockReader() const {
+    sp<AImageReader> reader = mReader.promote();
+    if (reader == nullptr) {
+        // Reader has been closed
+        return;
+    }
+    reader->mLock.unlock();
+}
+
+media_status_t
+AImage::getWidth(int32_t* width) const {
+    if (width == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *width = -1;
+    if (isClosed()) {
+        ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+    *width = mWidth;
+    return AMEDIA_OK;
+}
+
+media_status_t
+AImage::getHeight(int32_t* height) const {
+    if (height == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *height = -1;
+    if (isClosed()) {
+        ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+    *height = mHeight;
+    return AMEDIA_OK;
+}
+
+media_status_t
+AImage::getFormat(int32_t* format) const {
+    if (format == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *format = -1;
+    if (isClosed()) {
+        ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+    *format = mFormat;
+    return AMEDIA_OK;
+}
+
+media_status_t
+AImage::getNumPlanes(int32_t* numPlanes) const {
+    if (numPlanes == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *numPlanes = -1;
+    if (isClosed()) {
+        ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+    *numPlanes = mNumPlanes;
+    return AMEDIA_OK;
+}
+
+media_status_t
+AImage::getTimestamp(int64_t* timestamp) const {
+    if (timestamp == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *timestamp = -1;
+    if (isClosed()) {
+        ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+    *timestamp = mTimestamp;
+    return AMEDIA_OK;
+}
+
+media_status_t
+AImage::getPlanePixelStride(int planeIdx, /*out*/int32_t* pixelStride) const {
+    if (planeIdx < 0 || planeIdx >= mNumPlanes) {
+        ALOGE("Error: planeIdx %d out of bound [0,%d]",
+                planeIdx, mNumPlanes - 1);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    if (pixelStride == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    if (isClosed()) {
+        ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+    int32_t fmt = mBuffer->flexFormat;
+    switch (fmt) {
+        case HAL_PIXEL_FORMAT_YCbCr_420_888:
+            *pixelStride = (planeIdx == 0) ? 1 : mBuffer->chromaStep;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+            *pixelStride = (planeIdx == 0) ? 1 : 2;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_Y8:
+            *pixelStride = 1;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_YV12:
+            *pixelStride = 1;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_Y16:
+        case HAL_PIXEL_FORMAT_RAW16:
+        case HAL_PIXEL_FORMAT_RGB_565:
+            // Single plane 16bpp data.
+            *pixelStride = 2;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_RGBA_8888:
+        case HAL_PIXEL_FORMAT_RGBX_8888:
+            *pixelStride = 4;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_RGB_888:
+            // Single plane, 24bpp.
+            *pixelStride = 3;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_BLOB:
+        case HAL_PIXEL_FORMAT_RAW10:
+        case HAL_PIXEL_FORMAT_RAW12:
+        case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+            // Blob is used for JPEG data, RAW10 and RAW12 is used for 10-bit and 12-bit raw data,
+            // those are single plane data without pixel stride defined
+            return AMEDIA_ERROR_UNSUPPORTED;
+        default:
+            ALOGE("Pixel format: 0x%x is unsupported", fmt);
+            return AMEDIA_ERROR_UNSUPPORTED;
+    }
+}
+
+media_status_t
+AImage::getPlaneRowStride(int planeIdx, /*out*/int32_t* rowStride) const {
+    if (planeIdx < 0 || planeIdx >= mNumPlanes) {
+        ALOGE("Error: planeIdx %d out of bound [0,%d]",
+                planeIdx, mNumPlanes - 1);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    if (rowStride == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    if (isClosed()) {
+        ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+    int32_t fmt = mBuffer->flexFormat;
+    switch (fmt) {
+        case HAL_PIXEL_FORMAT_YCbCr_420_888:
+            *rowStride = (planeIdx == 0) ? mBuffer->stride : mBuffer->chromaStride;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+            *rowStride = mBuffer->width;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_YV12:
+            if (mBuffer->stride % 16) {
+                ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            *rowStride = (planeIdx == 0) ? mBuffer->stride : ALIGN(mBuffer->stride / 2, 16);
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_RAW10:
+        case HAL_PIXEL_FORMAT_RAW12:
+            // RAW10 and RAW12 are used for 10-bit and 12-bit raw data, they are single plane
+            *rowStride = mBuffer->stride;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_Y8:
+            if (mBuffer->stride % 16) {
+                ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            *rowStride = mBuffer->stride;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_Y16:
+        case HAL_PIXEL_FORMAT_RAW16:
+            // In native side, strides are specified in pixels, not in bytes.
+            // Single plane 16bpp bayer data. even width/height,
+            // row stride multiple of 16 pixels (32 bytes)
+            if (mBuffer->stride % 16) {
+                ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            *rowStride = mBuffer->stride * 2;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_RGB_565:
+            *rowStride = mBuffer->stride * 2;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_RGBA_8888:
+        case HAL_PIXEL_FORMAT_RGBX_8888:
+            *rowStride = mBuffer->stride * 4;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_RGB_888:
+            // Single plane, 24bpp.
+            *rowStride = mBuffer->stride * 3;
+            return AMEDIA_OK;
+        case HAL_PIXEL_FORMAT_BLOB:
+        case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+            // Blob is used for JPEG/Raw opaque data. It is single plane and has 0 row stride and
+            // no row stride defined
+            return AMEDIA_ERROR_UNSUPPORTED;
+        default:
+            ALOGE("%s Pixel format: 0x%x is unsupported", __FUNCTION__, fmt);
+          return AMEDIA_ERROR_UNSUPPORTED;
+    }
+}
+
+uint32_t
+AImage::getJpegSize() const {
+    if (mBuffer == nullptr) {
+        LOG_ALWAYS_FATAL("Error: buffer is null");
+    }
+
+    uint32_t size = 0;
+    uint32_t width = mBuffer->width;
+    uint8_t* jpegBuffer = mBuffer->data;
+
+    // First check for JPEG transport header at the end of the buffer
+    uint8_t* header = jpegBuffer + (width - sizeof(struct camera3_jpeg_blob));
+    struct camera3_jpeg_blob* blob = (struct camera3_jpeg_blob*)(header);
+    if (blob->jpeg_blob_id == CAMERA3_JPEG_BLOB_ID) {
+        size = blob->jpeg_size;
+        ALOGV("%s: Jpeg size = %d", __FUNCTION__, size);
+    }
+
+    // failed to find size, default to whole buffer
+    if (size == 0) {
+        /*
+         * This is a problem because not including the JPEG header
+         * means that in certain rare situations a regular JPEG blob
+         * will be misidentified as having a header, in which case
+         * we will get a garbage size value.
+         */
+        ALOGW("%s: No JPEG header detected, defaulting to size=width=%d",
+                __FUNCTION__, width);
+        size = width;
+    }
+
+    return size;
+}
+
+media_status_t
+AImage::getPlaneData(int planeIdx,/*out*/uint8_t** data, /*out*/int* dataLength) const {
+    if (planeIdx < 0 || planeIdx >= mNumPlanes) {
+        ALOGE("Error: planeIdx %d out of bound [0,%d]",
+                planeIdx, mNumPlanes - 1);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    if (data == nullptr || dataLength == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    if (isClosed()) {
+        ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+
+    uint32_t dataSize, ySize, cSize, cStride;
+    uint8_t* cb = nullptr;
+    uint8_t* cr = nullptr;
+    uint8_t* pData = nullptr;
+    int bytesPerPixel = 0;
+    int32_t fmt = mBuffer->flexFormat;
+
+    switch (fmt) {
+        case HAL_PIXEL_FORMAT_YCbCr_420_888:
+            pData = (planeIdx == 0) ? mBuffer->data :
+                    (planeIdx == 1) ? mBuffer->dataCb : mBuffer->dataCr;
+            // only map until last pixel
+            if (planeIdx == 0) {
+                dataSize = mBuffer->stride * (mBuffer->height - 1) + mBuffer->width;
+            } else {
+                dataSize = mBuffer->chromaStride * (mBuffer->height / 2 - 1) +
+                        mBuffer->chromaStep * (mBuffer->width / 2 - 1) + 1;
+            }
+            break;
+        // NV21
+        case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+            cr = mBuffer->data + (mBuffer->stride * mBuffer->height);
+            cb = cr + 1;
+            // only map until last pixel
+            ySize = mBuffer->width * (mBuffer->height - 1) + mBuffer->width;
+            cSize = mBuffer->width * (mBuffer->height / 2 - 1) + mBuffer->width - 1;
+
+            pData = (planeIdx == 0) ? mBuffer->data :
+                    (planeIdx == 1) ? cb : cr;
+            dataSize = (planeIdx == 0) ? ySize : cSize;
+            break;
+        case HAL_PIXEL_FORMAT_YV12:
+            // Y and C stride need to be 16 pixel aligned.
+            if (mBuffer->stride % 16) {
+                ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+
+            ySize = mBuffer->stride * mBuffer->height;
+            cStride = ALIGN(mBuffer->stride / 2, 16);
+            cr = mBuffer->data + ySize;
+            cSize = cStride * mBuffer->height / 2;
+            cb = cr + cSize;
+
+            pData = (planeIdx == 0) ? mBuffer->data :
+                    (planeIdx == 1) ? cb : cr;
+            dataSize = (planeIdx == 0) ? ySize : cSize;
+            break;
+        case HAL_PIXEL_FORMAT_Y8:
+            // Single plane, 8bpp.
+
+            pData = mBuffer->data;
+            dataSize = mBuffer->stride * mBuffer->height;
+            break;
+        case HAL_PIXEL_FORMAT_Y16:
+            bytesPerPixel = 2;
+
+            pData = mBuffer->data;
+            dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+            break;
+        case HAL_PIXEL_FORMAT_BLOB:
+            // Used for JPEG data, height must be 1, width == size, single plane.
+            if (mBuffer->height != 1) {
+                ALOGE("Jpeg should have height value one but got %d", mBuffer->height);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+
+            pData = mBuffer->data;
+            dataSize = getJpegSize();
+            break;
+        case HAL_PIXEL_FORMAT_RAW16:
+            // Single plane 16bpp bayer data.
+            bytesPerPixel = 2;
+            pData = mBuffer->data;
+            dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+            break;
+        case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+            // Used for RAW_OPAQUE data, height must be 1, width == size, single plane.
+            if (mBuffer->height != 1) {
+                ALOGE("RAW_OPAQUE should have height value one but got %d", mBuffer->height);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            pData = mBuffer->data;
+            dataSize = mBuffer->width;
+            break;
+        case HAL_PIXEL_FORMAT_RAW10:
+            // Single plane 10bpp bayer data.
+            if (mBuffer->width % 4) {
+                ALOGE("Width is not multiple of 4 %d", mBuffer->width);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            if (mBuffer->height % 2) {
+                ALOGE("Height is not multiple of 2 %d", mBuffer->height);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            if (mBuffer->stride < (mBuffer->width * 10 / 8)) {
+                ALOGE("stride (%d) should be at least %d",
+                        mBuffer->stride, mBuffer->width * 10 / 8);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            pData = mBuffer->data;
+            dataSize = mBuffer->stride * mBuffer->height;
+            break;
+        case HAL_PIXEL_FORMAT_RAW12:
+            // Single plane 10bpp bayer data.
+            if (mBuffer->width % 4) {
+                ALOGE("Width is not multiple of 4 %d", mBuffer->width);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            if (mBuffer->height % 2) {
+                ALOGE("Height is not multiple of 2 %d", mBuffer->height);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            if (mBuffer->stride < (mBuffer->width * 12 / 8)) {
+                ALOGE("stride (%d) should be at least %d",
+                        mBuffer->stride, mBuffer->width * 12 / 8);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+            pData = mBuffer->data;
+            dataSize = mBuffer->stride * mBuffer->height;
+            break;
+        case HAL_PIXEL_FORMAT_RGBA_8888:
+        case HAL_PIXEL_FORMAT_RGBX_8888:
+            // Single plane, 32bpp.
+            bytesPerPixel = 4;
+            pData = mBuffer->data;
+            dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+            break;
+        case HAL_PIXEL_FORMAT_RGB_565:
+            // Single plane, 16bpp.
+            bytesPerPixel = 2;
+            pData = mBuffer->data;
+            dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+            break;
+        case HAL_PIXEL_FORMAT_RGB_888:
+            // Single plane, 24bpp.
+            bytesPerPixel = 3;
+            pData = mBuffer->data;
+            dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+            break;
+        default:
+            ALOGE("Pixel format: 0x%x is unsupported", fmt);
+            return AMEDIA_ERROR_UNSUPPORTED;
+    }
+
+    *data = pData;
+    *dataLength = dataSize;
+    return AMEDIA_OK;
+}
+
+EXPORT
+void AImage_delete(AImage* image) {
+    ALOGV("%s", __FUNCTION__);
+    if (image != nullptr) {
+        image->lockReader();
+        image->close();
+        image->unlockReader();
+        if (!image->isClosed()) {
+            LOG_ALWAYS_FATAL("Image close failed!");
+        }
+        image->free();
+    }
+    return;
+}
+
+EXPORT
+media_status_t AImage_getWidth(const AImage* image, /*out*/int32_t* width) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || width == nullptr) {
+        ALOGE("%s: bad argument. image %p width %p",
+                __FUNCTION__, image, width);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return image->getWidth(width);
+}
+
+EXPORT
+media_status_t AImage_getHeight(const AImage* image, /*out*/int32_t* height) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || height == nullptr) {
+        ALOGE("%s: bad argument. image %p height %p",
+                __FUNCTION__, image, height);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return image->getHeight(height);
+}
+
+EXPORT
+media_status_t AImage_getFormat(const AImage* image, /*out*/int32_t* format) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || format == nullptr) {
+        ALOGE("%s: bad argument. image %p format %p",
+                __FUNCTION__, image, format);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return image->getFormat(format);
+}
+
+EXPORT
+media_status_t AImage_getCropRect(const AImage* image, /*out*/AImageCropRect* rect) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || rect == nullptr) {
+        ALOGE("%s: bad argument. image %p rect %p",
+                __FUNCTION__, image, rect);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    // For now AImage only supports camera outputs where cropRect is always full window
+    int32_t width = -1;
+    media_status_t ret = image->getWidth(&width);
+    if (ret != AMEDIA_OK) {
+        return ret;
+    }
+    int32_t height = -1;
+    ret = image->getHeight(&height);
+    if (ret != AMEDIA_OK) {
+        return ret;
+    }
+    rect->left = 0;
+    rect->top = 0;
+    rect->right = width;
+    rect->bottom = height;
+    return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AImage_getTimestamp(const AImage* image, /*out*/int64_t* timestampNs) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || timestampNs == nullptr) {
+        ALOGE("%s: bad argument. image %p timestampNs %p",
+                __FUNCTION__, image, timestampNs);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return image->getTimestamp(timestampNs);
+}
+
+EXPORT
+media_status_t AImage_getNumberOfPlanes(const AImage* image, /*out*/int32_t* numPlanes) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || numPlanes == nullptr) {
+        ALOGE("%s: bad argument. image %p numPlanes %p",
+                __FUNCTION__, image, numPlanes);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return image->getNumPlanes(numPlanes);
+}
+
+EXPORT
+media_status_t AImage_getPlanePixelStride(
+        const AImage* image, int planeIdx, /*out*/int32_t* pixelStride) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || pixelStride == nullptr) {
+        ALOGE("%s: bad argument. image %p pixelStride %p",
+                __FUNCTION__, image, pixelStride);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return image->getPlanePixelStride(planeIdx, pixelStride);
+}
+
+EXPORT
+media_status_t AImage_getPlaneRowStride(
+        const AImage* image, int planeIdx, /*out*/int32_t* rowStride) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || rowStride == nullptr) {
+        ALOGE("%s: bad argument. image %p rowStride %p",
+                __FUNCTION__, image, rowStride);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return image->getPlaneRowStride(planeIdx, rowStride);
+}
+
+EXPORT
+media_status_t AImage_getPlaneData(
+        const AImage* image, int planeIdx,
+        /*out*/uint8_t** data, /*out*/int* dataLength) {
+    ALOGV("%s", __FUNCTION__);
+    if (image == nullptr || data == nullptr || dataLength == nullptr) {
+        ALOGE("%s: bad argument. image %p data %p dataLength %p",
+                __FUNCTION__, image, data, dataLength);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return image->getPlaneData(planeIdx, data, dataLength);
+}
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
new file mode 100644
index 0000000..89d2b7c
--- /dev/null
+++ b/media/ndk/NdkImagePriv.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NDK_IMAGE_PRIV_H
+#define _NDK_IMAGE_PRIV_H
+
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/StrongPointer.h>
+
+#include <gui/CpuConsumer.h>
+
+#include "NdkImageReaderPriv.h"
+#include "NdkImage.h"
+
+
+using namespace android;
+
+// TODO: this only supports ImageReader
+struct AImage {
+    AImage(AImageReader* reader, int32_t format,
+            CpuConsumer::LockedBuffer* buffer, int64_t timestamp,
+            int32_t width, int32_t height, int32_t numPlanes);
+
+    // free all resources while keeping object alive. Caller must obtain reader lock
+    void close();
+
+    // Remove from object memory. Must be called after close
+    void free();
+
+    bool isClosed() const ;
+
+    // only For AImage to grab reader lock
+    // Always grab reader lock before grabbing image lock
+    void lockReader() const;
+    void unlockReader() const;
+
+    media_status_t getWidth(/*out*/int32_t* width) const;
+    media_status_t getHeight(/*out*/int32_t* height) const;
+    media_status_t getFormat(/*out*/int32_t* format) const;
+    media_status_t getNumPlanes(/*out*/int32_t* numPlanes) const;
+    media_status_t getTimestamp(/*out*/int64_t* timestamp) const;
+
+    media_status_t getPlanePixelStride(int planeIdx, /*out*/int32_t* pixelStride) const;
+    media_status_t getPlaneRowStride(int planeIdx, /*out*/int32_t* rowStride) const;
+    media_status_t getPlaneData(int planeIdx,/*out*/uint8_t** data, /*out*/int* dataLength) const;
+
+  private:
+    // AImage should be deleted through free() API.
+    ~AImage();
+
+    friend struct AImageReader; // for reader to access mBuffer
+
+    uint32_t getJpegSize() const;
+
+    // When reader is close, AImage will only accept close API call
+    wp<AImageReader>           mReader;
+    const int32_t              mFormat;
+    CpuConsumer::LockedBuffer* mBuffer;
+    const int64_t              mTimestamp;
+    const int32_t              mWidth;
+    const int32_t              mHeight;
+    const int32_t              mNumPlanes;
+    bool                       mIsClosed = false;
+    mutable Mutex              mLock;
+};
+
+#endif // _NDK_IMAGE_PRIV_H
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
new file mode 100644
index 0000000..30aa7fb
--- /dev/null
+++ b/media/ndk/NdkImageReader.cpp
@@ -0,0 +1,586 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkImageReader"
+
+#include "NdkImagePriv.h"
+#include "NdkImageReaderPriv.h"
+
+#include <utils/Log.h>
+#include <android_runtime/android_view_Surface.h>
+
+using namespace android;
+
+namespace {
+    // Get an ID that's unique within this process.
+    static int32_t createProcessUniqueId() {
+        static volatile int32_t globalCounter = 0;
+        return android_atomic_inc(&globalCounter);
+    }
+}
+
+const char* AImageReader::kCallbackFpKey = "Callback";
+const char* AImageReader::kContextKey    = "Context";
+
+bool
+AImageReader::isSupportedFormat(int32_t format) {
+    switch (format) {
+        case AIMAGE_FORMAT_YUV_420_888:
+        case AIMAGE_FORMAT_JPEG:
+        case AIMAGE_FORMAT_RAW16:
+        case AIMAGE_FORMAT_RAW_PRIVATE:
+        case AIMAGE_FORMAT_RAW10:
+        case AIMAGE_FORMAT_RAW12:
+        case AIMAGE_FORMAT_DEPTH16:
+        case AIMAGE_FORMAT_DEPTH_POINT_CLOUD:
+            return true;
+        default:
+            return false;
+    }
+}
+
+int
+AImageReader::getNumPlanesForFormat(int32_t format) {
+    switch (format) {
+        case AIMAGE_FORMAT_YUV_420_888:
+            return 3;
+        case AIMAGE_FORMAT_JPEG:
+        case AIMAGE_FORMAT_RAW16:
+        case AIMAGE_FORMAT_RAW_PRIVATE:
+        case AIMAGE_FORMAT_RAW10:
+        case AIMAGE_FORMAT_RAW12:
+        case AIMAGE_FORMAT_DEPTH16:
+        case AIMAGE_FORMAT_DEPTH_POINT_CLOUD:
+            return 1;
+        default:
+            return -1;
+    }
+}
+
+void
+AImageReader::FrameListener::onFrameAvailable(const BufferItem& /*item*/) {
+    Mutex::Autolock _l(mLock);
+    sp<AImageReader> reader = mReader.promote();
+    if (reader == nullptr) {
+        ALOGW("A frame is available after AImageReader closed!");
+        return; // reader has been closed
+    }
+    if (mListener.onImageAvailable == nullptr) {
+        return; // No callback registered
+    }
+
+    sp<AMessage> msg = new AMessage(AImageReader::kWhatImageAvailable, reader->mHandler);
+    msg->setPointer(AImageReader::kCallbackFpKey, (void *) mListener.onImageAvailable);
+    msg->setPointer(AImageReader::kContextKey, mListener.context);
+    msg->post();
+}
+
+media_status_t
+AImageReader::FrameListener::setImageListener(AImageReader_ImageListener* listener) {
+    Mutex::Autolock _l(mLock);
+    if (listener == nullptr) {
+        mListener.context = nullptr;
+        mListener.onImageAvailable = nullptr;
+    } else {
+        mListener = *listener;
+    }
+    return AMEDIA_OK;
+}
+
+media_status_t
+AImageReader::setImageListenerLocked(AImageReader_ImageListener* listener) {
+    return mFrameListener->setImageListener(listener);
+}
+
+media_status_t
+AImageReader::setImageListener(AImageReader_ImageListener* listener) {
+    Mutex::Autolock _l(mLock);
+    return setImageListenerLocked(listener);
+}
+
+void AImageReader::CallbackHandler::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatImageAvailable:
+        {
+            AImageReader_ImageCallback onImageAvailable;
+            void* context;
+            bool found = msg->findPointer(kCallbackFpKey, (void**) &onImageAvailable);
+            if (!found || onImageAvailable == nullptr) {
+                ALOGE("%s: Cannot find onImageAvailable callback fp!", __FUNCTION__);
+                return;
+            }
+            found = msg->findPointer(kContextKey, &context);
+            if (!found) {
+                ALOGE("%s: Cannot find callback context!", __FUNCTION__);
+                return;
+            }
+            (*onImageAvailable)(context, mReader);
+            break;
+        }
+        default:
+            ALOGE("%s: unknown message type %d", __FUNCTION__, msg->what());
+            break;
+    }
+}
+
+AImageReader::AImageReader(int32_t width, int32_t height, int32_t format, int32_t maxImages) :
+        mWidth(width), mHeight(height), mFormat(format), mMaxImages(maxImages),
+        mNumPlanes(getNumPlanesForFormat(format)),
+        mFrameListener(new FrameListener(this)) {}
+
+media_status_t
+AImageReader::init() {
+    PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
+    mHalFormat = android_view_Surface_mapPublicFormatToHalFormat(publicFormat);
+    mHalDataSpace = android_view_Surface_mapPublicFormatToHalDataspace(publicFormat);
+
+    sp<IGraphicBufferProducer> gbProducer;
+    sp<IGraphicBufferConsumer> gbConsumer;
+    BufferQueue::createBufferQueue(&gbProducer, &gbConsumer);
+
+    sp<CpuConsumer> cpuConsumer;
+    String8 consumerName = String8::format("ImageReader-%dx%df%xm%d-%d-%d",
+            mWidth, mHeight, mFormat, mMaxImages, getpid(),
+            createProcessUniqueId());
+
+    cpuConsumer = new CpuConsumer(gbConsumer, mMaxImages, /*controlledByApp*/true);
+    if (cpuConsumer == nullptr) {
+        ALOGE("Failed to allocate CpuConsumer");
+        return AMEDIA_ERROR_UNKNOWN;
+    }
+
+    mCpuConsumer = cpuConsumer;
+    mCpuConsumer->setName(consumerName);
+    mProducer = gbProducer;
+
+    sp<ConsumerBase> consumer = cpuConsumer;
+    consumer->setFrameAvailableListener(mFrameListener);
+
+    status_t res;
+    res = cpuConsumer->setDefaultBufferSize(mWidth, mHeight);
+    if (res != OK) {
+        ALOGE("Failed to set CpuConsumer buffer size");
+        return AMEDIA_ERROR_UNKNOWN;
+    }
+    res = cpuConsumer->setDefaultBufferFormat(mHalFormat);
+    if (res != OK) {
+        ALOGE("Failed to set CpuConsumer buffer format");
+        return AMEDIA_ERROR_UNKNOWN;
+    }
+    res = cpuConsumer->setDefaultBufferDataSpace(mHalDataSpace);
+    if (res != OK) {
+        ALOGE("Failed to set CpuConsumer buffer dataSpace");
+        return AMEDIA_ERROR_UNKNOWN;
+    }
+
+    mSurface = new Surface(mProducer, /*controlledByApp*/true);
+    if (mSurface == nullptr) {
+        ALOGE("Failed to create surface");
+        return AMEDIA_ERROR_UNKNOWN;
+    }
+    mWindow = static_cast<ANativeWindow*>(mSurface.get());
+
+    for (int i = 0; i < mMaxImages; i++) {
+        CpuConsumer::LockedBuffer* buffer = new CpuConsumer::LockedBuffer;
+        mBuffers.push_back(buffer);
+    }
+
+    mCbLooper = new ALooper;
+    mCbLooper->setName(consumerName.string());
+    res = mCbLooper->start(
+            /*runOnCallingThread*/false,
+            /*canCallJava*/       true,
+            PRIORITY_DEFAULT);
+    if (res != OK) {
+        ALOGE("Failed to start the looper");
+        return AMEDIA_ERROR_UNKNOWN;
+    }
+    mHandler = new CallbackHandler(this);
+    mCbLooper->registerHandler(mHandler);
+
+    return AMEDIA_OK;
+}
+
+AImageReader::~AImageReader() {
+    Mutex::Autolock _l(mLock);
+    AImageReader_ImageListener nullListener = {nullptr, nullptr};
+    setImageListenerLocked(&nullListener);
+
+    if (mCbLooper != nullptr) {
+        mCbLooper->unregisterHandler(mHandler->id());
+        mCbLooper->stop();
+    }
+    mCbLooper.clear();
+    mHandler.clear();
+
+    // Close all previously acquired images
+    for (auto it = mAcquiredImages.begin();
+              it != mAcquiredImages.end(); it++) {
+        AImage* image = *it;
+        image->close();
+    }
+
+    // Delete LockedBuffers
+    for (auto it = mBuffers.begin();
+              it != mBuffers.end(); it++) {
+        delete *it;
+    }
+
+    if (mCpuConsumer != nullptr) {
+        mCpuConsumer->abandon();
+        mCpuConsumer->setFrameAvailableListener(nullptr);
+    }
+}
+
+media_status_t
+AImageReader::acquireCpuConsumerImageLocked(/*out*/AImage** image) {
+    *image = nullptr;
+    CpuConsumer::LockedBuffer* buffer = getLockedBufferLocked();
+    if (buffer == nullptr) {
+        ALOGW("Unable to acquire a lockedBuffer, very likely client tries to lock more than"
+            " maxImages buffers");
+        return AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED;
+    }
+
+    status_t res = mCpuConsumer->lockNextBuffer(buffer);
+    if (res != NO_ERROR) {
+        returnLockedBufferLocked(buffer);
+        if (res != BAD_VALUE /*no buffers*/) {
+            if (res == NOT_ENOUGH_DATA) {
+                return AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED;
+            } else {
+                ALOGE("%s Fail to lockNextBuffer with error: %d ",
+                      __FUNCTION__, res);
+                return AMEDIA_ERROR_UNKNOWN;
+            }
+        }
+        return AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE;
+    }
+
+    if (buffer->flexFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
+        ALOGE("NV21 format is not supported by AImageReader");
+        return AMEDIA_ERROR_UNSUPPORTED;
+    }
+
+    // Check if the left-top corner of the crop rect is origin, we currently assume this point is
+    // zero, will revist this once this assumption turns out problematic.
+    Point lt = buffer->crop.leftTop();
+    if (lt.x != 0 || lt.y != 0) {
+        ALOGE("crop left top corner [%d, %d] need to be at origin", lt.x, lt.y);
+        return AMEDIA_ERROR_UNKNOWN;
+    }
+
+    // Check if the producer buffer configurations match what ImageReader configured.
+    int outputWidth = getBufferWidth(buffer);
+    int outputHeight = getBufferHeight(buffer);
+
+    int readerFmt = mHalFormat;
+    int readerWidth = mWidth;
+    int readerHeight = mHeight;
+
+    if ((buffer->format != HAL_PIXEL_FORMAT_BLOB) && (readerFmt != HAL_PIXEL_FORMAT_BLOB) &&
+            (readerWidth != outputWidth || readerHeight != outputHeight)) {
+        ALOGW("%s: Producer buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
+                __FUNCTION__, outputWidth, outputHeight, readerWidth, readerHeight);
+    }
+
+    int bufFmt = buffer->format;
+    if (readerFmt == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+        bufFmt = buffer->flexFormat;
+    }
+
+    if (readerFmt != bufFmt) {
+        if (readerFmt == HAL_PIXEL_FORMAT_YCbCr_420_888 && (bufFmt ==
+                HAL_PIXEL_FORMAT_YCrCb_420_SP || bufFmt == HAL_PIXEL_FORMAT_YV12)) {
+            // Special casing for when producer switches to a format compatible with flexible YUV
+            // (HAL_PIXEL_FORMAT_YCbCr_420_888).
+            mHalFormat = bufFmt;
+            ALOGD("%s: Overriding buffer format YUV_420_888 to %x.", __FUNCTION__, bufFmt);
+        } else {
+            // Return the buffer to the queue.
+            mCpuConsumer->unlockBuffer(*buffer);
+            returnLockedBufferLocked(buffer);
+
+            ALOGE("Producer output buffer format: 0x%x, ImageReader configured format: 0x%x",
+                    buffer->format, readerFmt);
+
+            return AMEDIA_ERROR_UNKNOWN;
+        }
+    }
+
+    if (mHalFormat == HAL_PIXEL_FORMAT_BLOB) {
+        *image = new AImage(this, mFormat, buffer, buffer->timestamp,
+                            readerWidth, readerHeight, mNumPlanes);
+    } else {
+        *image = new AImage(this, mFormat, buffer, buffer->timestamp,
+                            outputWidth, outputHeight, mNumPlanes);
+    }
+    mAcquiredImages.push_back(*image);
+    return AMEDIA_OK;
+}
+
+CpuConsumer::LockedBuffer*
+AImageReader::getLockedBufferLocked() {
+    if (mBuffers.empty()) {
+        return nullptr;
+    }
+    // Return a LockedBuffer pointer and remove it from the list
+    auto it = mBuffers.begin();
+    CpuConsumer::LockedBuffer* buffer = *it;
+    mBuffers.erase(it);
+    return buffer;
+}
+
+void
+AImageReader::returnLockedBufferLocked(CpuConsumer::LockedBuffer* buffer) {
+    mBuffers.push_back(buffer);
+}
+
+void
+AImageReader::releaseImageLocked(AImage* image) {
+    CpuConsumer::LockedBuffer* buffer = image->mBuffer;
+    if (buffer == nullptr) {
+        // This should not happen, but is not fatal
+        ALOGW("AImage %p has no buffer!", image);
+        return;
+    }
+
+    mCpuConsumer->unlockBuffer(*buffer);
+    returnLockedBufferLocked(buffer);
+    image->mBuffer = nullptr;
+
+    bool found = false;
+    // cleanup acquired image list
+    for (auto it = mAcquiredImages.begin();
+              it != mAcquiredImages.end(); it++) {
+        AImage* readerCopy = *it;
+        if (readerCopy == image) {
+            found = true;
+            mAcquiredImages.erase(it);
+            break;
+        }
+    }
+    if (!found) {
+        ALOGE("Error: AImage %p is not generated by AImageReader %p",
+                image, this);
+    }
+}
+
+int
+AImageReader::getBufferWidth(CpuConsumer::LockedBuffer* buffer) {
+    if (buffer == nullptr) return -1;
+
+    if (!buffer->crop.isEmpty()) {
+        return buffer->crop.getWidth();
+    }
+    return buffer->width;
+}
+
+int
+AImageReader::getBufferHeight(CpuConsumer::LockedBuffer* buffer) {
+    if (buffer == nullptr) return -1;
+
+    if (!buffer->crop.isEmpty()) {
+        return buffer->crop.getHeight();
+    }
+    return buffer->height;
+}
+
+media_status_t
+AImageReader::acquireNextImage(/*out*/AImage** image) {
+    Mutex::Autolock _l(mLock);
+    return acquireCpuConsumerImageLocked(image);
+}
+
+media_status_t
+AImageReader::acquireLatestImage(/*out*/AImage** image) {
+    if (image == nullptr) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    Mutex::Autolock _l(mLock);
+    *image = nullptr;
+    AImage* prevImage = nullptr;
+    AImage* nextImage = nullptr;
+    media_status_t ret = acquireCpuConsumerImageLocked(&prevImage);
+    if (prevImage == nullptr) {
+        return ret;
+    }
+    for (;;) {
+        ret = acquireCpuConsumerImageLocked(&nextImage);
+        if (nextImage == nullptr) {
+            *image = prevImage;
+            return AMEDIA_OK;
+        }
+        prevImage->close();
+        prevImage->free();
+        prevImage = nextImage;
+        nextImage = nullptr;
+    }
+}
+
+EXPORT
+media_status_t AImageReader_new(
+        int32_t width, int32_t height, int32_t format, int32_t maxImages,
+        /*out*/AImageReader** reader) {
+    ALOGV("%s", __FUNCTION__);
+
+    if (width < 1 || height < 1) {
+        ALOGE("%s: image dimension must be positive: w:%d h:%d",
+                __FUNCTION__, width, height);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (maxImages < 1) {
+        ALOGE("%s: max outstanding image count must be at least 1 (%d)",
+                __FUNCTION__, maxImages);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (!AImageReader::isSupportedFormat(format)) {
+        ALOGE("%s: format %d is not supported by AImageReader",
+                __FUNCTION__, format);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+
+    if (reader == nullptr) {
+        ALOGE("%s: reader argument is null", __FUNCTION__);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+
+    //*reader = new AImageReader(width, height, format, maxImages);
+    AImageReader* tmpReader = new AImageReader(width, height, format, maxImages);
+    if (tmpReader == nullptr) {
+        ALOGE("%s: AImageReader allocation failed", __FUNCTION__);
+        return AMEDIA_ERROR_UNKNOWN;
+    }
+    media_status_t ret = tmpReader->init();
+    if (ret != AMEDIA_OK) {
+        ALOGE("%s: AImageReader initialization failed!", __FUNCTION__);
+        delete tmpReader;
+        return ret;
+    }
+    *reader = tmpReader;
+    (*reader)->incStrong((void*) AImageReader_new);
+    return AMEDIA_OK;
+}
+
+EXPORT
+void AImageReader_delete(AImageReader* reader) {
+    ALOGV("%s", __FUNCTION__);
+    if (reader != nullptr) {
+        reader->decStrong((void*) AImageReader_delete);
+    }
+    return;
+}
+
+EXPORT
+media_status_t AImageReader_getWindow(AImageReader* reader, /*out*/ANativeWindow** window) {
+    ALOGE("%s", __FUNCTION__);
+    if (reader == nullptr || window == nullptr) {
+        ALOGE("%s: invalid argument. reader %p, window %p",
+                __FUNCTION__, reader, window);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *window = reader->getWindow();
+    return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AImageReader_getWidth(const AImageReader* reader, /*out*/int32_t* width) {
+    ALOGV("%s", __FUNCTION__);
+    if (reader == nullptr || width == nullptr) {
+        ALOGE("%s: invalid argument. reader %p, width %p",
+                __FUNCTION__, reader, width);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *width = reader->getWidth();
+    return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AImageReader_getHeight(const AImageReader* reader, /*out*/int32_t* height) {
+    ALOGV("%s", __FUNCTION__);
+    if (reader == nullptr || height == nullptr) {
+        ALOGE("%s: invalid argument. reader %p, height %p",
+                __FUNCTION__, reader, height);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *height = reader->getHeight();
+    return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AImageReader_getFormat(const AImageReader* reader, /*out*/int32_t* format) {
+    ALOGV("%s", __FUNCTION__);
+    if (reader == nullptr || format == nullptr) {
+        ALOGE("%s: invalid argument. reader %p, format %p",
+                __FUNCTION__, reader, format);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *format = reader->getFormat();
+    return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AImageReader_getMaxImages(const AImageReader* reader, /*out*/int32_t* maxImages) {
+    ALOGV("%s", __FUNCTION__);
+    if (reader == nullptr || maxImages == nullptr) {
+        ALOGE("%s: invalid argument. reader %p, maxImages %p",
+                __FUNCTION__, reader, maxImages);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    *maxImages = reader->getMaxImages();
+    return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AImageReader_acquireNextImage(AImageReader* reader, /*out*/AImage** image) {
+    ALOGV("%s", __FUNCTION__);
+    if (reader == nullptr || image == nullptr) {
+        ALOGE("%s: invalid argument. reader %p, maxImages %p",
+                __FUNCTION__, reader, image);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return reader->acquireNextImage(image);
+}
+
+EXPORT
+media_status_t AImageReader_acquireLatestImage(AImageReader* reader, /*out*/AImage** image) {
+    ALOGV("%s", __FUNCTION__);
+    if (reader == nullptr || image == nullptr) {
+        ALOGE("%s: invalid argument. reader %p, maxImages %p",
+                __FUNCTION__, reader, image);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    return reader->acquireLatestImage(image);
+}
+
+EXPORT
+media_status_t AImageReader_setImageListener(
+        AImageReader* reader, AImageReader_ImageListener* listener) {
+    ALOGV("%s", __FUNCTION__);
+    if (reader == nullptr) {
+        ALOGE("%s: invalid argument! reader %p", __FUNCTION__, reader);
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+
+    reader->setImageListener(listener);
+    return AMEDIA_OK;
+}
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
new file mode 100644
index 0000000..48f0953
--- /dev/null
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NDK_IMAGE_READER_PRIV_H
+#define _NDK_IMAGE_READER_PRIV_H
+
+#include <inttypes.h>
+
+#include "NdkImageReader.h"
+
+#include <utils/List.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+
+#include <gui/CpuConsumer.h>
+#include <gui/Surface.h>
+
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+using namespace android;
+
+namespace {
+    enum {
+        IMAGE_READER_MAX_NUM_PLANES = 3,
+    };
+
+    enum {
+        ACQUIRE_SUCCESS = 0,
+        ACQUIRE_NO_BUFFERS = 1,
+        ACQUIRE_MAX_IMAGES = 2,
+    };
+}
+
+struct AImageReader : public RefBase {
+  public:
+
+    static bool isSupportedFormat(int32_t format);
+    static int getNumPlanesForFormat(int32_t format);
+
+    AImageReader(int32_t width, int32_t height, int32_t format, int32_t maxImages);
+    ~AImageReader();
+
+    // Inintialize AImageReader, uninitialized or failed to initialize AImageReader
+    // should never be passed to application
+    media_status_t init();
+
+    media_status_t setImageListener(AImageReader_ImageListener* listener);
+
+    media_status_t acquireNextImage(/*out*/AImage** image);
+    media_status_t acquireLatestImage(/*out*/AImage** image);
+
+    ANativeWindow* getWindow()    const { return mWindow.get(); };
+    int32_t        getWidth()     const { return mWidth; };
+    int32_t        getHeight()    const { return mHeight; };
+    int32_t        getFormat()    const { return mFormat; };
+    int32_t        getMaxImages() const { return mMaxImages; };
+
+
+  private:
+
+    friend struct AImage; // for grabing reader lock
+
+    media_status_t acquireCpuConsumerImageLocked(/*out*/AImage** image);
+    CpuConsumer::LockedBuffer* getLockedBufferLocked();
+    void returnLockedBufferLocked(CpuConsumer::LockedBuffer* buffer);
+
+    // Called by AImage to close image
+    void releaseImageLocked(AImage* image);
+
+    static int getBufferWidth(CpuConsumer::LockedBuffer* buffer);
+    static int getBufferHeight(CpuConsumer::LockedBuffer* buffer);
+
+    media_status_t setImageListenerLocked(AImageReader_ImageListener* listener);
+
+    // definition of handler and message
+    enum {
+        kWhatImageAvailable
+    };
+    static const char* kCallbackFpKey;
+    static const char* kContextKey;
+    class CallbackHandler : public AHandler {
+      public:
+        CallbackHandler(AImageReader* reader) : mReader(reader) {}
+        void onMessageReceived(const sp<AMessage> &msg) override;
+      private:
+        AImageReader* mReader;
+    };
+    sp<CallbackHandler> mHandler;
+    sp<ALooper>         mCbLooper; // Looper thread where callbacks actually happen on
+
+    List<CpuConsumer::LockedBuffer*> mBuffers;
+    const int32_t mWidth;
+    const int32_t mHeight;
+    const int32_t mFormat;
+    const int32_t mMaxImages;
+    const int32_t mNumPlanes;
+
+    struct FrameListener : public ConsumerBase::FrameAvailableListener {
+      public:
+        FrameListener(AImageReader* parent) : mReader(parent) {}
+
+        void onFrameAvailable(const BufferItem& item) override;
+
+        media_status_t setImageListener(AImageReader_ImageListener* listener);
+
+      private:
+        AImageReader_ImageListener mListener = {nullptr, nullptr};
+        wp<AImageReader>           mReader;
+        Mutex                      mLock;
+    };
+    sp<FrameListener> mFrameListener;
+
+    int mHalFormat;
+    android_dataspace mHalDataSpace;
+
+    sp<IGraphicBufferProducer> mProducer;
+    sp<Surface>                mSurface;
+    sp<CpuConsumer>            mCpuConsumer;
+    sp<ANativeWindow>          mWindow;
+
+    List<AImage*>              mAcquiredImages;
+
+    Mutex                      mLock;
+};
+
+#endif // _NDK_IMAGE_READER_PRIV_H
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index cd0c462..50b490d 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -145,10 +145,15 @@
     AMediaCodec *mData = new AMediaCodec();
     mData->mLooper = new ALooper;
     mData->mLooper->setName("NDK MediaCodec_looper");
-    status_t ret = mData->mLooper->start(
+    size_t res = mData->mLooper->start(
             false,      // runOnCallingThread
             true,       // canCallJava XXX
             PRIORITY_FOREGROUND);
+    if (res != OK) {
+        ALOGE("Failed to start the looper");
+        AMediaCodec_delete(mData);
+        return NULL;
+    }
     if (name_is_type) {
         mData->mCodec = android::MediaCodec::CreateByType(mData->mLooper, name, encoder);
     } else {
@@ -359,6 +364,15 @@
     return translate_error(mData->mCodec->renderOutputBufferAndRelease(idx, timestampNs));
 }
 
+EXPORT
+media_status_t AMediaCodec_setOutputSurface(AMediaCodec *mData, ANativeWindow* window) {
+    sp<Surface> surface = NULL;
+    if (window != NULL) {
+        surface = (Surface*) window;
+    }
+    return translate_error(mData->mCodec->setSurface(surface));
+}
+
 //EXPORT
 media_status_t AMediaCodec_setNotificationCallback(AMediaCodec *mData, OnCodecEvent callback,
         void *userdata) {
@@ -372,6 +386,7 @@
         uint8_t key[16];
         uint8_t iv[16];
         cryptoinfo_mode_t mode;
+        cryptoinfo_pattern_t pattern;
         size_t *clearbytes;
         size_t *encryptedbytes;
 } AMediaCodecCryptoInfo;
@@ -391,6 +406,10 @@
         subSamples[i].mNumBytesOfEncryptedData = crypto->encryptedbytes[i];
     }
 
+    CryptoPlugin::Pattern pattern;
+    pattern.mEncryptBlocks = crypto->pattern.encryptBlocks;
+    pattern.mSkipBlocks = crypto->pattern.skipBlocks;
+
     AString errormsg;
     status_t err  = codec->mCodec->queueSecureInputBuffer(idx,
             offset,
@@ -398,7 +417,8 @@
             crypto->numsubsamples,
             crypto->key,
             crypto->iv,
-            (CryptoPlugin::Mode) crypto->mode,
+            (CryptoPlugin::Mode)crypto->mode,
+            pattern,
             time,
             flags,
             &errormsg);
@@ -410,6 +430,12 @@
 }
 
 
+EXPORT
+void AMediaCodecCryptoInfo_setPattern(AMediaCodecCryptoInfo *info,
+        cryptoinfo_pattern_t *pattern) {
+    info->pattern.encryptBlocks = pattern->encryptBlocks;
+    info->pattern.skipBlocks = pattern->skipBlocks;
+}
 
 EXPORT
 AMediaCodecCryptoInfo *AMediaCodecCryptoInfo_new(
@@ -431,6 +457,8 @@
     memcpy(ret->key, key, 16);
     memcpy(ret->iv, iv, 16);
     ret->mode = mode;
+    ret->pattern.encryptBlocks = 0;
+    ret->pattern.skipBlocks = 0;
 
     // clearbytes and encryptedbytes point at the actual data, which follows
     ret->clearbytes = (size_t*) (ret + 1); // point immediately after the struct
diff --git a/media/ndk/NdkMediaCrypto.cpp b/media/ndk/NdkMediaCrypto.cpp
index 1cc2f1a..32aabdd 100644
--- a/media/ndk/NdkMediaCrypto.cpp
+++ b/media/ndk/NdkMediaCrypto.cpp
@@ -23,11 +23,12 @@
 #include "NdkMediaFormatPriv.h"
 
 
+#include <cutils/properties.h>
 #include <utils/Log.h>
 #include <utils/StrongPointer.h>
 #include <binder/IServiceManager.h>
 #include <media/ICrypto.h>
-#include <media/IMediaPlayerService.h>
+#include <media/IMediaDrmService.h>
 #include <android_runtime/AndroidRuntime.h>
 #include <android_util_Binder.h>
 
@@ -35,34 +36,19 @@
 
 using namespace android;
 
-static media_status_t translate_error(status_t err) {
-    if (err == OK) {
-        return AMEDIA_OK;
-    }
-    ALOGE("sf error code: %d", err);
-    return AMEDIA_ERROR_UNKNOWN;
-}
-
-
 static sp<ICrypto> makeCrypto() {
     sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.drm"));
 
-    sp<IBinder> binder =
-        sm->getService(String16("media.player"));
-
-    sp<IMediaPlayerService> service =
-        interface_cast<IMediaPlayerService>(binder);
-
+    sp<IMediaDrmService> service = interface_cast<IMediaDrmService>(binder);
     if (service == NULL) {
         return NULL;
     }
 
     sp<ICrypto> crypto = service->makeCrypto();
-
     if (crypto == NULL || (crypto->initCheck() != OK && crypto->initCheck() != NO_INIT)) {
         return NULL;
     }
-
     return crypto;
 }
 
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 83a5ba1..be71f43 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -19,6 +19,7 @@
 
 #include "NdkMediaDrm.h"
 
+#include <cutils/properties.h>
 #include <utils/Log.h>
 #include <utils/StrongPointer.h>
 #include <gui/Surface.h>
@@ -27,7 +28,7 @@
 #include <media/IDrmClient.h>
 #include <media/stagefright/MediaErrors.h>
 #include <binder/IServiceManager.h>
-#include <media/IMediaPlayerService.h>
+#include <media/IMediaDrmService.h>
 #include <ndk/NdkMediaCrypto.h>
 
 
@@ -148,23 +149,17 @@
 
 static sp<IDrm> CreateDrm() {
     sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.drm"));
 
-    sp<IBinder> binder =
-        sm->getService(String16("media.player"));
-
-    sp<IMediaPlayerService> service =
-        interface_cast<IMediaPlayerService>(binder);
-
+    sp<IMediaDrmService> service = interface_cast<IMediaDrmService>(binder);
     if (service == NULL) {
         return NULL;
     }
 
     sp<IDrm> drm = service->makeDrm();
-
     if (drm == NULL || (drm->initCheck() != OK && drm->initCheck() != NO_INIT)) {
         return NULL;
     }
-
     return drm;
 }
 
@@ -616,9 +611,9 @@
 
     Vector<uint8_t> outputVec;
     if (encrypt) {
-        status_t status = mObj->mDrm->encrypt(*iter, keyIdVec, inputVec, ivVec, outputVec);
+        status = mObj->mDrm->encrypt(*iter, keyIdVec, inputVec, ivVec, outputVec);
     } else {
-        status_t status = mObj->mDrm->decrypt(*iter, keyIdVec, inputVec, ivVec, outputVec);
+        status = mObj->mDrm->decrypt(*iter, keyIdVec, inputVec, ivVec, outputVec);
     }
     if (status == OK) {
         memcpy(output, outputVec.array(), outputVec.size());
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 0ecd64f..1118959 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -23,6 +23,7 @@
 #include "NdkMediaFormatPriv.h"
 
 
+#include <inttypes.h>
 #include <utils/Log.h>
 #include <utils/StrongPointer.h>
 #include <media/hardware/CryptoAPI.h>
@@ -72,7 +73,7 @@
 EXPORT
 media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor *mData, int fd, off64_t offset,
         off64_t length) {
-    ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
+    ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
     return translate_error(mData->mImpl->setDataSource(fd, offset, length));
 }
 
@@ -243,15 +244,27 @@
     while (len > 0) {
         numentries++;
 
+        if (len < 16) {
+            ALOGE("invalid PSSH data");
+            return NULL;
+        }
         // skip uuid
         data += 16;
         len -= 16;
 
         // get data length
+        if (len < 4) {
+            ALOGE("invalid PSSH data");
+            return NULL;
+        }
         uint32_t datalen = *((uint32_t*)data);
         data += 4;
         len -= 4;
 
+        if (len < datalen) {
+            ALOGE("invalid PSSH data");
+            return NULL;
+        }
         // skip the data
         data += datalen;
         len -= datalen;
@@ -265,6 +278,10 @@
     // extra pointer for each entry, and an extra size_t for the entire PsshInfo.
     size_t newsize = buffer->size() - (sizeof(uint32_t) * numentries) + sizeof(size_t)
             + ((sizeof(void*) + sizeof(size_t)) * numentries);
+    if (newsize <= buffer->size()) {
+        ALOGE("invalid PSSH data");
+        return NULL;
+    }
     ex->mPsshBuf = new ABuffer(newsize);
     ex->mPsshBuf->setRange(0, newsize);
 
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index a354d58..5598d5d 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -46,6 +46,10 @@
     ALOGV("private ctor");
     AMediaFormat* mData = new AMediaFormat();
     mData->mFormat = *((sp<AMessage>*)data);
+    if (mData->mFormat == NULL) {
+        ALOGW("got NULL format");
+        mData->mFormat = new AMessage;
+    }
     return mData;
 }
 
diff --git a/media/utils/BatteryNotifier.cpp b/media/utils/BatteryNotifier.cpp
index 7f9cd7a..341d391 100644
--- a/media/utils/BatteryNotifier.cpp
+++ b/media/utils/BatteryNotifier.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "BatteryNotifier"
+//#define LOG_NDEBUG 0
+
 #include "include/mediautils/BatteryNotifier.h"
 
 #include <binder/IServiceManager.h>
@@ -64,7 +67,7 @@
     sp<IBatteryStats> batteryService = getBatteryService_l();
     mVideoRefCount = 0;
     if (batteryService != nullptr) {
-        batteryService->noteResetAudio();
+        batteryService->noteResetVideo();
     }
 }
 
@@ -72,7 +75,7 @@
     Mutex::Autolock _l(mLock);
     sp<IBatteryStats> batteryService = getBatteryService_l();
     if (mAudioRefCount == 0 && batteryService != nullptr) {
-        batteryService->noteStartAudio(AID_MEDIA);
+        batteryService->noteStartAudio(AID_AUDIOSERVER);
     }
     mAudioRefCount++;
 }
@@ -88,7 +91,7 @@
 
     mAudioRefCount--;
     if (mAudioRefCount == 0 && batteryService != nullptr) {
-        batteryService->noteStopAudio(AID_MEDIA);
+        batteryService->noteStopAudio(AID_AUDIOSERVER);
     }
 }
 
@@ -190,20 +193,25 @@
         const String16 name("batterystats");
         mBatteryStatService = interface_cast<IBatteryStats>(sm->checkService(name));
         if (mBatteryStatService == nullptr) {
-            ALOGE("batterystats service unavailable!");
+            // this may occur normally during the init sequence as mediaserver
+            // and audioserver start before the batterystats service is available.
+            ALOGW("batterystats service unavailable!");
             return nullptr;
         }
 
         mDeathNotifier = new DeathNotifier();
         IInterface::asBinder(mBatteryStatService)->linkToDeath(mDeathNotifier);
 
-        // Notify start now if media already started
+        // Notify start now if mediaserver or audioserver is already started.
+        // 1) mediaserver and audioserver is started before batterystats service
+        // 2) batterystats server may have crashed.
         if (mVideoRefCount > 0) {
             mBatteryStatService->noteStartVideo(AID_MEDIA);
         }
         if (mAudioRefCount > 0) {
-            mBatteryStatService->noteStartAudio(AID_MEDIA);
+            mBatteryStatService->noteStartAudio(AID_AUDIOSERVER);
         }
+        // TODO: Notify for camera and flashlight state as well?
     }
     return mBatteryStatService;
 }
diff --git a/radio/Android.mk b/radio/Android.mk
index ecbb8fd..0377328 100644
--- a/radio/Android.mk
+++ b/radio/Android.mk
@@ -36,4 +36,6 @@
 
 LOCAL_MODULE:= libradio
 
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/radio/IRadioService.cpp b/radio/IRadioService.cpp
index 8c2b3ef..81acf9e 100644
--- a/radio/IRadioService.cpp
+++ b/radio/IRadioService.cpp
@@ -87,7 +87,8 @@
         data.writeInterfaceToken(IRadioService::getInterfaceDescriptor());
         data.writeInt32(handle);
         data.writeStrongBinder(IInterface::asBinder(client));
-        ALOGV("attach() config %p withAudio %d region %d type %d", config, withAudio, config->region, config->band.type);
+        ALOGV("attach() config %p withAudio %d region %d type %d",
+              config == NULL ? 0 : config, withAudio, config->region, config->band.type);
         if (config == NULL) {
             data.writeInt32(0);
         } else {
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 9b4ba79..4f826e5 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -34,6 +34,7 @@
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audiopolicy \
     $(TOPDIR)external/sonic \
+    libcore/include \
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
 
@@ -41,12 +42,12 @@
     libaudioresampler \
     libaudiospdif \
     libaudioutils \
-    libcommon_time_client \
     libcutils \
     libutils \
     liblog \
     libbinder \
     libmedia \
+    libmediautils \
     libnbaio \
     libhardware \
     libhardware_legacy \
@@ -54,14 +55,16 @@
     libpowermanager \
     libserviceutility \
     libsonic \
-    libmediautils
+    libmediautils \
+    libmemunreachable
 
 LOCAL_STATIC_LIBRARIES := \
     libcpustats \
     libmedia_helper
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
 LOCAL_MODULE:= libaudioflinger
-LOCAL_32_BIT_ONLY := true
 
 LOCAL_SRC_FILES += \
     AudioWatchdog.cpp        \
@@ -79,6 +82,8 @@
 
 LOCAL_CFLAGS += -fvisibility=hidden
 
+LOCAL_CFLAGS += -Werror -Wall
+
 include $(BUILD_SHARED_LIBRARY)
 
 #
@@ -107,6 +112,8 @@
 
 LOCAL_MODULE_TAGS := optional
 
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
@@ -127,6 +134,11 @@
 
 LOCAL_MODULE := libaudioresampler
 
+LOCAL_CFLAGS := -Werror -Wall
+
+# uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
+#LOCAL_CFLAGS += -DUSE_NEON=false
+
 include $(BUILD_SHARED_LIBRARY)
 
 include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index fab1ef5..d2fee81 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -31,6 +31,7 @@
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <binder/Parcel.h>
+#include <memunreachable/memunreachable.h>
 #include <utils/String16.h>
 #include <utils/threads.h>
 #include <utils/Atomic.h>
@@ -56,13 +57,12 @@
 
 #include <powermanager/PowerManager.h>
 
-#include <common_time/cc_helper.h>
-
 #include <media/IMediaLogService.h>
-
+#include <media/MemoryLeakTrackUtil.h>
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
 #include <media/AudioParameter.h>
+#include <mediautils/BatteryNotifier.h>
 #include <private/android_filesystem_config.h>
 
 // ----------------------------------------------------------------------------
@@ -108,7 +108,7 @@
 // ----------------------------------------------------------------------------
 
 const char *formatToString(audio_format_t format) {
-    switch (format & AUDIO_FORMAT_MAIN_MASK) {
+    switch (audio_get_main_format(format)) {
     case AUDIO_FORMAT_PCM:
         switch (format) {
         case AUDIO_FORMAT_PCM_16_BIT: return "pcm16";
@@ -131,6 +131,7 @@
     case AUDIO_FORMAT_OPUS: return "opus";
     case AUDIO_FORMAT_AC3: return "ac-3";
     case AUDIO_FORMAT_E_AC3: return "e-ac-3";
+    case AUDIO_FORMAT_IEC61937: return "iec61937";
     default:
         break;
     }
@@ -175,7 +176,7 @@
       mHardwareStatus(AUDIO_HW_IDLE),
       mMasterVolume(1.0f),
       mMasterMute(false),
-      mNextUniqueId(1),
+      // mNextUniqueId(AUDIO_UNIQUE_ID_USE_MAX),
       mMode(AUDIO_MODE_INVALID),
       mBtNrecIsOff(false),
       mIsLowRamDevice(true),
@@ -183,15 +184,26 @@
       mGlobalEffectEnableTime(0),
       mSystemReady(false)
 {
+    // unsigned instead of audio_unique_id_use_t, because ++ operator is unavailable for enum
+    for (unsigned use = AUDIO_UNIQUE_ID_USE_UNSPECIFIED; use < AUDIO_UNIQUE_ID_USE_MAX; use++) {
+        // zero ID has a special meaning, so unavailable
+        mNextUniqueIds[use] = AUDIO_UNIQUE_ID_USE_MAX;
+    }
+
     getpid_cached = getpid();
-    char value[PROPERTY_VALUE_MAX];
-    bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
+    const bool doLog = property_get_bool("ro.test_harness", false);
     if (doLog) {
         mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
                 MemoryHeapBase::READ_ONLY);
     }
 
+    // reset battery stats.
+    // if the audio service has crashed, battery stats could be left
+    // in bad state, reset the state upon service start.
+    BatteryNotifier::getInstance().noteResetAudio();
+
 #ifdef TEE_SINK
+    char value[PROPERTY_VALUE_MAX];
     (void) property_get("ro.debuggable", value, "0");
     int debuggable = atoi(value);
     int teeEnabled = 0;
@@ -214,8 +226,6 @@
 
 void AudioFlinger::onFirstRef()
 {
-    int rc = 0;
-
     Mutex::Autolock _l(mLock);
 
     /* TODO: move all this work into an Init() function */
@@ -255,16 +265,17 @@
     }
 
     // Tell media.log service about any old writers that still need to be unregistered
-    sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
-    if (binder != 0) {
-        sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
-        for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
-            sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
-            mUnregisteredWriters.pop();
-            mediaLogService->unregisterWriter(iMemory);
+    if (mLogMemoryDealer != 0) {
+        sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
+        if (binder != 0) {
+            sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
+            for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
+                sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
+                mUnregisteredWriters.pop();
+                mediaLogService->unregisterWriter(iMemory);
+            }
         }
     }
-
 }
 
 static const char * const audio_interfaces[] = {
@@ -458,6 +469,29 @@
                 binder->dump(fd, args);
             }
         }
+
+        // check for optional arguments
+        bool dumpMem = false;
+        bool unreachableMemory = false;
+        for (const auto &arg : args) {
+            if (arg == String16("-m")) {
+                dumpMem = true;
+            } else if (arg == String16("--unreachable")) {
+                unreachableMemory = true;
+            }
+        }
+
+        if (dumpMem) {
+            dprintf(fd, "\nDumping memory:\n");
+            std::string s = dumpMemoryAddresses(100 /* limit */);
+            write(fd, s.c_str(), s.size());
+        }
+        if (unreachableMemory) {
+            dprintf(fd, "\nDumping unreachable memory:\n");
+            // TODO - should limit be an argument parameter?
+            std::string s = GetUnreachableMemoryString(true /* contents */, 100 /* limit */);
+            write(fd, s.c_str(), s.size());
+        }
     }
     return NO_ERROR;
 }
@@ -545,8 +579,9 @@
         IAudioFlinger::track_flags_t *flags,
         const sp<IMemory>& sharedBuffer,
         audio_io_handle_t output,
+        pid_t pid,
         pid_t tid,
-        int *sessionId,
+        audio_session_t *sessionId,
         int clientUid,
         status_t *status)
 {
@@ -554,7 +589,16 @@
     sp<TrackHandle> trackHandle;
     sp<Client> client;
     status_t lStatus;
-    int lSessionId;
+    audio_session_t lSessionId;
+
+    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+    if (pid == -1 || !isTrustedCallingUid(callingUid)) {
+        const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        ALOGW_IF(pid != -1 && pid != callingPid,
+                 "%s uid %d pid %d tried to pass itself off as pid %d",
+                 __func__, callingUid, callingPid, pid);
+        pid = callingPid;
+    }
 
     // client AudioTrack::set already implements AUDIO_STREAM_DEFAULT => AUDIO_STREAM_MUSIC,
     // but if someone uses binder directly they could bypass that and cause us to crash
@@ -600,11 +644,15 @@
             goto Exit;
         }
 
-        pid_t pid = IPCThreadState::self()->getCallingPid();
         client = registerPid(pid);
 
         PlaybackThread *effectThread = NULL;
         if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
+            if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+                ALOGE("createTrack() invalid session ID %d", *sessionId);
+                lStatus = BAD_VALUE;
+                goto Exit;
+            }
             lSessionId = *sessionId;
             // check if an effect chain with the same session ID is present on another
             // output thread and move it here.
@@ -620,7 +668,7 @@
             }
         } else {
             // if no audio session id is provided, create one here
-            lSessionId = nextUniqueId();
+            lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
             if (sessionId != NULL) {
                 *sessionId = lSessionId;
             }
@@ -656,7 +704,7 @@
             }
         }
 
-        setAudioHwSyncForSession_l(thread, (audio_session_t)lSessionId);
+        setAudioHwSyncForSession_l(thread, lSessionId);
     }
 
     if (lStatus != NO_ERROR) {
@@ -680,12 +728,12 @@
     return trackHandle;
 }
 
-uint32_t AudioFlinger::sampleRate(audio_io_handle_t output) const
+uint32_t AudioFlinger::sampleRate(audio_io_handle_t ioHandle) const
 {
     Mutex::Autolock _l(mLock);
-    PlaybackThread *thread = checkPlaybackThread_l(output);
+    ThreadBase *thread = checkThread_l(ioHandle);
     if (thread == NULL) {
-        ALOGW("sampleRate() unknown thread %d", output);
+        ALOGW("sampleRate() unknown thread %d", ioHandle);
         return 0;
     }
     return thread->sampleRate();
@@ -702,12 +750,12 @@
     return thread->format();
 }
 
-size_t AudioFlinger::frameCount(audio_io_handle_t output) const
+size_t AudioFlinger::frameCount(audio_io_handle_t ioHandle) const
 {
     Mutex::Autolock _l(mLock);
-    PlaybackThread *thread = checkPlaybackThread_l(output);
+    ThreadBase *thread = checkThread_l(ioHandle);
     if (thread == NULL) {
-        ALOGW("frameCount() unknown thread %d", output);
+        ALOGW("frameCount() unknown thread %d", ioHandle);
         return 0;
     }
     // FIXME currently returns the normal mixer's frame count to avoid confusing legacy callers;
@@ -715,6 +763,17 @@
     return thread->frameCount();
 }
 
+size_t AudioFlinger::frameCountHAL(audio_io_handle_t ioHandle) const
+{
+    Mutex::Autolock _l(mLock);
+    ThreadBase *thread = checkThread_l(ioHandle);
+    if (thread == NULL) {
+        ALOGW("frameCountHAL() unknown thread %d", ioHandle);
+        return 0;
+    }
+    return thread->frameCountHAL();
+}
+
 uint32_t AudioFlinger::latency(audio_io_handle_t output) const
 {
     Mutex::Autolock _l(mLock);
@@ -1064,10 +1123,10 @@
                     audio_devices_t device = thread->inDevice();
                     bool suspend = audio_is_bluetooth_sco_device(device) && btNrecIsOff;
                     // collect all of the thread's session IDs
-                    KeyedVector<int, bool> ids = thread->sessionIds();
+                    KeyedVector<audio_session_t, bool> ids = thread->sessionIds();
                     // suspend effects associated with those session IDs
                     for (size_t j = 0; j < ids.size(); ++j) {
-                        int sessionId = ids.keyAt(j);
+                        audio_session_t sessionId = ids.keyAt(j);
                         thread->setEffectSuspended(FX_IID_AEC,
                                                    suspend,
                                                    sessionId);
@@ -1156,7 +1215,9 @@
     if (ret != NO_ERROR) {
         return 0;
     }
-    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
+    if ((sampleRate == 0) ||
+            !audio_is_valid_format(format) || !audio_has_proportional_frames(format) ||
+            !audio_is_input_channel(channelMask)) {
         return 0;
     }
 
@@ -1233,8 +1294,6 @@
 status_t AudioFlinger::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
         audio_io_handle_t output) const
 {
-    status_t status;
-
     Mutex::Autolock _l(mLock);
 
     PlaybackThread *playbackThread = checkPlaybackThread_l(output);
@@ -1293,7 +1352,7 @@
     bool removed = false;
     for (size_t i = 0; i< num; ) {
         AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
-        ALOGV(" pid %d @ %d", ref->mPid, i);
+        ALOGV(" pid %d @ %zu", ref->mPid, i);
         if (ref->mPid == pid) {
             ALOGV(" removing entry for pid %d session %d", pid, ref->mSessionid);
             mAudioSessionRefs.removeAt(i);
@@ -1331,7 +1390,8 @@
 }
 
 // getEffectThread_l() must be called with AudioFlinger::mLock held
-sp<AudioFlinger::PlaybackThread> AudioFlinger::getEffectThread_l(int sessionId, int EffectId)
+sp<AudioFlinger::PlaybackThread> AudioFlinger::getEffectThread_l(audio_session_t sessionId,
+        int EffectId)
 {
     sp<PlaybackThread> thread;
 
@@ -1352,8 +1412,7 @@
 AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)
     :   RefBase(),
         mAudioFlinger(audioFlinger),
-        mPid(pid),
-        mTimedTrackCount(0)
+        mPid(pid)
 {
     size_t heapSize = kClientSharedHeapSizeBytes;
     // Increase heap size on non low ram devices to limit risk of reconnection failure for
@@ -1375,31 +1434,6 @@
     return mMemoryDealer;
 }
 
-// Reserve one of the limited slots for a timed audio track associated
-// with this client
-bool AudioFlinger::Client::reserveTimedTrack()
-{
-    const int kMaxTimedTracksPerClient = 4;
-
-    Mutex::Autolock _l(mTimedTrackLock);
-
-    if (mTimedTrackCount >= kMaxTimedTracksPerClient) {
-        ALOGW("can not create timed track - pid %d has exceeded the limit",
-             mPid);
-        return false;
-    }
-
-    mTimedTrackCount++;
-    return true;
-}
-
-// Release a slot for a timed audio track
-void AudioFlinger::Client::releaseTimedTrack()
-{
-    Mutex::Autolock _l(mTimedTrackLock);
-    mTimedTrackCount--;
-}
-
 // ----------------------------------------------------------------------------
 
 AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
@@ -1422,10 +1456,6 @@
 
 // ----------------------------------------------------------------------------
 
-static bool deviceRequiresCaptureAudioOutputPermission(audio_devices_t inDevice) {
-    return audio_is_remote_submix_device(inDevice);
-}
-
 sp<IAudioRecord> AudioFlinger::openRecord(
         audio_io_handle_t input,
         uint32_t sampleRate,
@@ -1434,9 +1464,10 @@
         const String16& opPackageName,
         size_t *frameCount,
         IAudioFlinger::track_flags_t *flags,
+        pid_t pid,
         pid_t tid,
         int clientUid,
-        int *sessionId,
+        audio_session_t *sessionId,
         size_t *notificationFrames,
         sp<IMemory>& cblk,
         sp<IMemory>& buffers,
@@ -1446,13 +1477,30 @@
     sp<RecordHandle> recordHandle;
     sp<Client> client;
     status_t lStatus;
-    int lSessionId;
+    audio_session_t lSessionId;
 
     cblk.clear();
     buffers.clear();
 
+    bool updatePid = (pid == -1);
+    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+    if (!isTrustedCallingUid(callingUid)) {
+        ALOGW_IF((uid_t)clientUid != callingUid,
+                "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
+        clientUid = callingUid;
+        updatePid = true;
+    }
+
+    if (updatePid) {
+        const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        ALOGW_IF(pid != -1 && pid != callingPid,
+                 "%s uid %d pid %d tried to pass itself off as pid %d",
+                 __func__, callingUid, callingPid, pid);
+        pid = callingPid;
+    }
+
     // check calling permissions
-    if (!recordingAllowed(opPackageName)) {
+    if (!recordingAllowed(opPackageName, tid, clientUid)) {
         ALOGE("openRecord() permission denied: recording not allowed");
         lStatus = PERMISSION_DENIED;
         goto Exit;
@@ -1488,21 +1536,23 @@
             goto Exit;
         }
 
-        pid_t pid = IPCThreadState::self()->getCallingPid();
         client = registerPid(pid);
 
         if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
+            if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+                lStatus = BAD_VALUE;
+                goto Exit;
+            }
             lSessionId = *sessionId;
         } else {
             // if no audio session id is provided, create one here
-            lSessionId = nextUniqueId();
+            lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
             if (sessionId != NULL) {
                 *sessionId = lSessionId;
             }
         }
         ALOGV("openRecord() lSessionId: %d input %d", lSessionId, input);
 
-        // TODO: the uid should be passed in as a parameter to openRecord
         recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
                                                   frameCount, lSessionId, notificationFrames,
                                                   clientUid, flags, tid, &lStatus);
@@ -1511,7 +1561,7 @@
         if (lStatus == NO_ERROR) {
             // Check if one effect chain was awaiting for an AudioRecord to be created on this
             // session and move it to this thread.
-            sp<EffectChain> chain = getOrphanEffectChain_l((audio_session_t)lSessionId);
+            sp<EffectChain> chain = getOrphanEffectChain_l(lSessionId);
             if (chain != 0) {
                 Mutex::Autolock _l(thread->mLock);
                 thread->addEffectChain_l(chain);
@@ -1550,10 +1600,10 @@
 audio_module_handle_t AudioFlinger::loadHwModule(const char *name)
 {
     if (name == NULL) {
-        return 0;
+        return AUDIO_MODULE_HANDLE_NONE;
     }
     if (!settingsAllowed()) {
-        return 0;
+        return AUDIO_MODULE_HANDLE_NONE;
     }
     Mutex::Autolock _l(mLock);
     return loadHwModule_l(name);
@@ -1573,16 +1623,16 @@
 
     int rc = load_audio_interface(name, &dev);
     if (rc) {
-        ALOGI("loadHwModule() error %d loading module %s ", rc, name);
-        return 0;
+        ALOGE("loadHwModule() error %d loading module %s", rc, name);
+        return AUDIO_MODULE_HANDLE_NONE;
     }
 
     mHardwareStatus = AUDIO_HW_INIT;
     rc = dev->init_check(dev);
     mHardwareStatus = AUDIO_HW_IDLE;
     if (rc) {
-        ALOGI("loadHwModule() init check error %d for module %s ", rc, name);
-        return 0;
+        ALOGE("loadHwModule() init check error %d for module %s", rc, name);
+        return AUDIO_MODULE_HANDLE_NONE;
     }
 
     // Check and cache this HAL's level of support for master mute and master
@@ -1629,7 +1679,7 @@
         mHardwareStatus = AUDIO_HW_IDLE;
     }
 
-    audio_module_handle_t handle = nextUniqueId();
+    audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
     mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
 
     ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
@@ -1773,9 +1823,13 @@
         return 0;
     }
 
-    audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
     if (*output == AUDIO_IO_HANDLE_NONE) {
-        *output = nextUniqueId();
+        *output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);
+    } else {
+        // Audio Policy does not currently request a specific output handle.
+        // If this is ever needed, see openInput_l() for example code.
+        ALOGE("openOutput_l requested output handle %d is not AUDIO_IO_HANDLE_NONE", *output);
+        return 0;
     }
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
@@ -1892,7 +1946,7 @@
         return AUDIO_IO_HANDLE_NONE;
     }
 
-    audio_io_handle_t id = nextUniqueId();
+    audio_io_handle_t id = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);
     DuplicatingThread *thread = new DuplicatingThread(this, thread1, id, mSystemReady);
     thread->addOutputTrack(thread2);
     mPlaybackThreads.add(id, thread);
@@ -2046,8 +2100,18 @@
         return 0;
     }
 
+    // Audio Policy can request a specific handle for hardware hotword.
+    // The goal here is not to re-open an already opened input.
+    // It is to use a pre-assigned I/O handle.
     if (*input == AUDIO_IO_HANDLE_NONE) {
-        *input = nextUniqueId();
+        *input = nextUniqueId(AUDIO_UNIQUE_ID_USE_INPUT);
+    } else if (audio_unique_id_get_use(*input) != AUDIO_UNIQUE_ID_USE_INPUT) {
+        ALOGE("openInput_l() requested input handle %d is invalid", *input);
+        return 0;
+    } else if (mRecordThreads.indexOfKey(*input) >= 0) {
+        // This should not happen in a transient state with current design.
+        ALOGE("openInput_l() requested input handle %d is already assigned", *input);
+        return 0;
     }
 
     audio_config_t halconfig = *config;
@@ -2070,8 +2134,8 @@
         audio_is_linear_pcm(config->format) &&
         audio_is_linear_pcm(halconfig.format) &&
         (halconfig.sample_rate <= AUDIO_RESAMPLER_DOWN_RATIO_MAX * config->sample_rate) &&
-        (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_2) &&
-        (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_2)) {
+        (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_8) &&
+        (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_8)) {
         // FIXME describe the change proposed by HAL (save old values so we can log them here)
         ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
@@ -2251,12 +2315,18 @@
 }
 
 
-audio_unique_id_t AudioFlinger::newAudioUniqueId()
+audio_unique_id_t AudioFlinger::newAudioUniqueId(audio_unique_id_use_t use)
 {
-    return nextUniqueId();
+    // This is a binder API, so a malicious client could pass in a bad parameter.
+    // Check for that before calling the internal API nextUniqueId().
+    if ((unsigned) use >= (unsigned) AUDIO_UNIQUE_ID_USE_MAX) {
+        ALOGE("newAudioUniqueId invalid use %d", use);
+        return AUDIO_UNIQUE_ID_ALLOCATE;
+    }
+    return nextUniqueId(use);
 }
 
-void AudioFlinger::acquireAudioSessionId(int audioSession, pid_t pid)
+void AudioFlinger::acquireAudioSessionId(audio_session_t audioSession, pid_t pid)
 {
     Mutex::Autolock _l(mLock);
     pid_t caller = IPCThreadState::self()->getCallingPid();
@@ -2290,7 +2360,7 @@
     ALOGV(" added new entry for %d", audioSession);
 }
 
-void AudioFlinger::releaseAudioSessionId(int audioSession, pid_t pid)
+void AudioFlinger::releaseAudioSessionId(audio_session_t audioSession, pid_t pid)
 {
     Mutex::Autolock _l(mLock);
     pid_t caller = IPCThreadState::self()->getCallingPid();
@@ -2375,6 +2445,23 @@
     return;
 }
 
+// checkThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::ThreadBase *AudioFlinger::checkThread_l(audio_io_handle_t ioHandle) const
+{
+    ThreadBase *thread = NULL;
+    switch (audio_unique_id_get_use(ioHandle)) {
+    case AUDIO_UNIQUE_ID_USE_OUTPUT:
+        thread = checkPlaybackThread_l(ioHandle);
+        break;
+    case AUDIO_UNIQUE_ID_USE_INPUT:
+        thread = checkRecordThread_l(ioHandle);
+        break;
+    default:
+        break;
+    }
+    return thread;
+}
+
 // checkPlaybackThread_l() must be called with AudioFlinger::mLock held
 AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(audio_io_handle_t output) const
 {
@@ -2394,9 +2481,25 @@
     return mRecordThreads.valueFor(input).get();
 }
 
-uint32_t AudioFlinger::nextUniqueId()
+audio_unique_id_t AudioFlinger::nextUniqueId(audio_unique_id_use_t use)
 {
-    return (uint32_t) android_atomic_inc(&mNextUniqueId);
+    // This is the internal API, so it is OK to assert on bad parameter.
+    LOG_ALWAYS_FATAL_IF((unsigned) use >= (unsigned) AUDIO_UNIQUE_ID_USE_MAX);
+    const int maxRetries = use == AUDIO_UNIQUE_ID_USE_SESSION ? 3 : 1;
+    for (int retry = 0; retry < maxRetries; retry++) {
+        // The cast allows wraparound from max positive to min negative instead of abort
+        uint32_t base = (uint32_t) atomic_fetch_add_explicit(&mNextUniqueIds[use],
+                (uint_fast32_t) AUDIO_UNIQUE_ID_USE_MAX, memory_order_acq_rel);
+        ALOG_ASSERT(audio_unique_id_get_use(base) == AUDIO_UNIQUE_ID_USE_UNSPECIFIED);
+        // allow wrap by skipping 0 and -1 for session ids
+        if (!(base == 0 || base == (~0u & ~AUDIO_UNIQUE_ID_USE_MASK))) {
+            ALOGW_IF(retry != 0, "unique ID overflow for use %d", use);
+            return (audio_unique_id_t) (base | use);
+        }
+    }
+    // We have no way of recovering from wraparound
+    LOG_ALWAYS_FATAL("unique ID overflow for use %d", use);
+    // TODO Use a floor after wraparound.  This may need a mutex.
 }
 
 AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l() const
@@ -2426,8 +2529,8 @@
 }
 
 sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_event_t type,
-                                    int triggerSession,
-                                    int listenerSession,
+                                    audio_session_t triggerSession,
+                                    audio_session_t listenerSession,
                                     sync_event_callback_t callBack,
                                     wp<RefBase> cookie)
 {
@@ -2487,7 +2590,7 @@
         const sp<IEffectClient>& effectClient,
         int32_t priority,
         audio_io_handle_t io,
-        int sessionId,
+        audio_session_t sessionId,
         const String16& opPackageName,
         status_t *status,
         int *id,
@@ -2585,7 +2688,7 @@
 
         // check recording permission for visualizer
         if ((memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) &&
-            !recordingAllowed(opPackageName)) {
+            !recordingAllowed(opPackageName, pid, IPCThreadState::self()->getCallingUid())) {
             lStatus = PERMISSION_DENIED;
             goto Exit;
         }
@@ -2648,7 +2751,7 @@
         } else {
             // Check if one effect chain was awaiting for an effect to be created on this
             // session and used it instead of creating a new one.
-            sp<EffectChain> chain = getOrphanEffectChain_l((audio_session_t)sessionId);
+            sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
             if (chain != 0) {
                 Mutex::Autolock _l(thread->mLock);
                 thread->addEffectChain_l(chain);
@@ -2675,7 +2778,7 @@
     return handle;
 }
 
-status_t AudioFlinger::moveEffects(int sessionId, audio_io_handle_t srcOutput,
+status_t AudioFlinger::moveEffects(audio_session_t sessionId, audio_io_handle_t srcOutput,
         audio_io_handle_t dstOutput)
 {
     ALOGV("moveEffects() session %d, srcOutput %d, dstOutput %d",
@@ -2702,7 +2805,7 @@
 }
 
 // moveEffectChain_l must be called with both srcThread and dstThread mLocks held
-status_t AudioFlinger::moveEffectChain_l(int sessionId,
+status_t AudioFlinger::moveEffectChain_l(audio_session_t sessionId,
                                    AudioFlinger::PlaybackThread *srcThread,
                                    AudioFlinger::PlaybackThread *dstThread,
                                    bool reRegister)
@@ -2828,9 +2931,9 @@
 
 status_t AudioFlinger::putOrphanEffectChain_l(const sp<AudioFlinger::EffectChain>& chain)
 {
-    audio_session_t session = (audio_session_t)chain->sessionId();
+    audio_session_t session = chain->sessionId();
     ssize_t index = mOrphanEffectChains.indexOfKey(session);
-    ALOGV("putOrphanEffectChain_l session %d index %d", session, index);
+    ALOGV("putOrphanEffectChain_l session %d index %zd", session, index);
     if (index >= 0) {
         ALOGW("putOrphanEffectChain_l chain for session %d already present", session);
         return ALREADY_EXISTS;
@@ -2843,7 +2946,7 @@
 {
     sp<EffectChain> chain;
     ssize_t index = mOrphanEffectChains.indexOfKey(session);
-    ALOGV("getOrphanEffectChain_l session %d index %d", session, index);
+    ALOGV("getOrphanEffectChain_l session %d index %zd", session, index);
     if (index >= 0) {
         chain = mOrphanEffectChains.valueAt(index);
         mOrphanEffectChains.removeItemsAt(index);
@@ -2854,13 +2957,13 @@
 bool AudioFlinger::updateOrphanEffectChains(const sp<AudioFlinger::EffectModule>& effect)
 {
     Mutex::Autolock _l(mLock);
-    audio_session_t session = (audio_session_t)effect->sessionId();
+    audio_session_t session = effect->sessionId();
     ssize_t index = mOrphanEffectChains.indexOfKey(session);
-    ALOGV("updateOrphanEffectChains session %d index %d", session, index);
+    ALOGV("updateOrphanEffectChains session %d index %zd", session, index);
     if (index >= 0) {
         sp<EffectChain> chain = mOrphanEffectChains.valueAt(index);
         if (chain->removeEffect_l(effect) == 0) {
-            ALOGV("updateOrphanEffectChains removing effect chain at index %d", index);
+            ALOGV("updateOrphanEffectChains removing effect chain at index %zd", index);
             mOrphanEffectChains.removeItemsAt(index);
         }
         return true;
@@ -2890,7 +2993,7 @@
         // failures at unlink() which are ignored.  It's also unlikely since
         // normally dumpsys is only done by bugreport or from the command line.
         char teePath[32+256];
-        strcpy(teePath, "/data/misc/media");
+        strcpy(teePath, "/data/misc/audioserver");
         size_t teePathLen = strlen(teePath);
         DIR *dir = opendir(teePath);
         teePath[teePathLen++] = '/';
@@ -2932,7 +3035,8 @@
             }
         } else {
             if (fd >= 0) {
-                dprintf(fd, "unable to rotate tees in %s: %s\n", teePath, strerror(errno));
+                dprintf(fd, "unable to rotate tees in %.*s: %s\n", teePathLen, teePath,
+                        strerror(errno));
             }
         }
         char teeTime[16];
@@ -2966,8 +3070,7 @@
             void *buffer = malloc(TEE_SINK_READ * frameSize);
             for (;;) {
                 size_t count = TEE_SINK_READ;
-                ssize_t actual = teeSource->read(buffer, count,
-                        AudioBufferProvider::kInvalidPTS);
+                ssize_t actual = teeSource->read(buffer, count);
                 bool wasFirstRead = firstRead;
                 firstRead = false;
                 if (actual <= 0) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 08fa70d..59ad688 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -23,8 +23,6 @@
 #include <sys/types.h>
 #include <limits.h>
 
-#include <common_time/cc_helper.h>
-
 #include <cutils/compiler.h>
 
 #include <media/IAudioFlinger.h>
@@ -59,6 +57,7 @@
 #include "AudioStreamOut.h"
 #include "SpdifStreamOut.h"
 #include "AudioHwDevice.h"
+#include "LinearMap.h"
 
 #include <powermanager/IPowerManager.h>
 
@@ -78,14 +77,6 @@
 
 // ----------------------------------------------------------------------------
 
-// The macro FCC_2 highlights some (but not all) places where there are are 2-channel assumptions.
-// This is typically due to legacy implementation of stereo input or output.
-// Search also for "2", "left", "right", "[0]", "[1]", ">> 16", "<< 16", etc.
-#define FCC_2 2     // FCC_2 = Fixed Channel Count 2
-// The macro FCC_8 highlights places where there are 8-channel assumptions.
-// This is typically due to audio mixer and resampler limitations.
-#define FCC_8 8     // FCC_8 = Fixed Channel Count 8
-
 static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
 
 
@@ -116,8 +107,9 @@
                                 IAudioFlinger::track_flags_t *flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
+                                pid_t pid,
                                 pid_t tid,
-                                int *sessionId,
+                                audio_session_t *sessionId,
                                 int clientUid,
                                 status_t *status /*non-NULL*/);
 
@@ -129,17 +121,19 @@
                                 const String16& opPackageName,
                                 size_t *pFrameCount,
                                 IAudioFlinger::track_flags_t *flags,
+                                pid_t pid,
                                 pid_t tid,
                                 int clientUid,
-                                int *sessionId,
+                                audio_session_t *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
                                 sp<IMemory>& buffers,
                                 status_t *status /*non-NULL*/);
 
-    virtual     uint32_t    sampleRate(audio_io_handle_t output) const;
+    virtual     uint32_t    sampleRate(audio_io_handle_t ioHandle) const;
     virtual     audio_format_t format(audio_io_handle_t output) const;
-    virtual     size_t      frameCount(audio_io_handle_t output) const;
+    virtual     size_t      frameCount(audio_io_handle_t ioHandle) const;
+    virtual     size_t      frameCountHAL(audio_io_handle_t ioHandle) const;
     virtual     uint32_t    latency(audio_io_handle_t output) const;
 
     virtual     status_t    setMasterVolume(float value);
@@ -205,11 +199,12 @@
 
     virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const;
 
-    virtual audio_unique_id_t newAudioUniqueId();
+    // This is the binder API.  For the internal API see nextUniqueId().
+    virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
 
-    virtual void acquireAudioSessionId(int audioSession, pid_t pid);
+    virtual void acquireAudioSessionId(audio_session_t audioSession, pid_t pid);
 
-    virtual void releaseAudioSessionId(int audioSession, pid_t pid);
+    virtual void releaseAudioSessionId(audio_session_t audioSession, pid_t pid);
 
     virtual status_t queryNumberEffects(uint32_t *numEffects) const;
 
@@ -223,13 +218,13 @@
                         const sp<IEffectClient>& effectClient,
                         int32_t priority,
                         audio_io_handle_t io,
-                        int sessionId,
+                        audio_session_t sessionId,
                         const String16& opPackageName,
                         status_t *status /*non-NULL*/,
                         int *id,
                         int *enabled);
 
-    virtual status_t moveEffects(int sessionId, audio_io_handle_t srcOutput,
+    virtual status_t moveEffects(audio_session_t sessionId, audio_io_handle_t srcOutput,
                         audio_io_handle_t dstOutput);
 
     virtual audio_module_handle_t loadHwModule(const char *name);
@@ -292,8 +287,8 @@
     class SyncEvent : public RefBase {
     public:
         SyncEvent(AudioSystem::sync_event_t type,
-                  int triggerSession,
-                  int listenerSession,
+                  audio_session_t triggerSession,
+                  audio_session_t listenerSession,
                   sync_event_callback_t callBack,
                   wp<RefBase> cookie)
         : mType(type), mTriggerSession(triggerSession), mListenerSession(listenerSession),
@@ -306,22 +301,22 @@
         bool isCancelled() const { Mutex::Autolock _l(mLock); return (mCallback == NULL); }
         void cancel() { Mutex::Autolock _l(mLock); mCallback = NULL; }
         AudioSystem::sync_event_t type() const { return mType; }
-        int triggerSession() const { return mTriggerSession; }
-        int listenerSession() const { return mListenerSession; }
+        audio_session_t triggerSession() const { return mTriggerSession; }
+        audio_session_t listenerSession() const { return mListenerSession; }
         wp<RefBase> cookie() const { return mCookie; }
 
     private:
           const AudioSystem::sync_event_t mType;
-          const int mTriggerSession;
-          const int mListenerSession;
+          const audio_session_t mTriggerSession;
+          const audio_session_t mListenerSession;
           sync_event_callback_t mCallback;
           const wp<RefBase> mCookie;
           mutable Mutex mLock;
     };
 
     sp<SyncEvent> createSyncEvent(AudioSystem::sync_event_t type,
-                                        int triggerSession,
-                                        int listenerSession,
+                                        audio_session_t triggerSession,
+                                        audio_session_t listenerSession,
                                         sync_event_callback_t callBack,
                                         wp<RefBase> cookie);
 
@@ -422,18 +417,12 @@
         pid_t               pid() const { return mPid; }
         sp<AudioFlinger>    audioFlinger() const { return mAudioFlinger; }
 
-        bool reserveTimedTrack();
-        void releaseTimedTrack();
-
     private:
                             Client(const Client&);
                             Client& operator = (const Client&);
         const sp<AudioFlinger> mAudioFlinger;
               sp<MemoryDealer> mMemoryDealer;
         const pid_t         mPid;
-
-        Mutex               mTimedTrackLock;
-        int                 mTimedTrackCount;
     };
 
     // --- Notification Client ---
@@ -504,12 +493,6 @@
         virtual void        flush();
         virtual void        pause();
         virtual status_t    attachAuxEffect(int effectId);
-        virtual status_t    allocateTimedBuffer(size_t size,
-                                                sp<IMemory>* buffer);
-        virtual status_t    queueTimedBuffer(const sp<IMemory>& buffer,
-                                             int64_t pts);
-        virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
-                                                  int target);
         virtual status_t    setParameters(const String8& keyValuePairs);
         virtual status_t    getTimestamp(AudioTimestamp& timestamp);
         virtual void        signal(); // signal playback thread for a change in control block
@@ -526,7 +509,8 @@
     public:
         RecordHandle(const sp<RecordThread::RecordTrack>& recordTrack);
         virtual             ~RecordHandle();
-        virtual status_t    start(int /*AudioSystem::sync_event_t*/ event, int triggerSession);
+        virtual status_t    start(int /*AudioSystem::sync_event_t*/ event,
+                audio_session_t triggerSession);
         virtual void        stop();
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
@@ -538,6 +522,7 @@
     };
 
 
+              ThreadBase *checkThread_l(audio_io_handle_t ioHandle) const;
               PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
               MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
               RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
@@ -568,25 +553,29 @@
                                    const sp<AudioIoDescriptor>& ioDesc,
                                    pid_t pid = 0);
 
-              // Allocate an audio_io_handle_t, session ID, effect ID, or audio_module_handle_t.
+              // Allocate an audio_unique_id_t.
+              // Specific types are audio_io_handle_t, audio_session_t, effect ID (int),
+              // audio_module_handle_t, and audio_patch_handle_t.
               // They all share the same ID space, but the namespaces are actually independent
               // because there are separate KeyedVectors for each kind of ID.
-              // The return value is uint32_t, but is cast to signed for some IDs.
+              // The return value is cast to the specific type depending on how the ID will be used.
               // FIXME This API does not handle rollover to zero (for unsigned IDs),
               //       or from positive to negative (for signed IDs).
               //       Thus it may fail by returning an ID of the wrong sign,
               //       or by returning a non-unique ID.
-              uint32_t nextUniqueId();
+              // This is the internal API.  For the binder API see newAudioUniqueId().
+              audio_unique_id_t nextUniqueId(audio_unique_id_use_t use);
 
-              status_t moveEffectChain_l(int sessionId,
+              status_t moveEffectChain_l(audio_session_t sessionId,
                                      PlaybackThread *srcThread,
                                      PlaybackThread *dstThread,
                                      bool reRegister);
+
               // return thread associated with primary hardware device, or NULL
               PlaybackThread *primaryPlaybackThread_l() const;
               audio_devices_t primaryOutputDevice_l() const;
 
-              sp<PlaybackThread> getEffectThread_l(int sessionId, int EffectId);
+              sp<PlaybackThread> getEffectThread_l(audio_session_t sessionId, int EffectId);
 
 
                 void        removeClient_l(pid_t pid);
@@ -629,9 +618,9 @@
 
     // for mAudioSessionRefs only
     struct AudioSessionRef {
-        AudioSessionRef(int sessionid, pid_t pid) :
+        AudioSessionRef(audio_session_t sessionid, pid_t pid) :
             mSessionid(sessionid), mPid(pid), mCnt(1) {}
-        const int   mSessionid;
+        const audio_session_t mSessionid;
         const pid_t mPid;
         int         mCnt;
     };
@@ -693,9 +682,8 @@
                 // protected by mClientLock
                 DefaultKeyedVector< pid_t, sp<NotificationClient> >    mNotificationClients;
 
-                volatile int32_t                    mNextUniqueId;  // updated by android_atomic_inc
-                // nextUniqueId() returns uint32_t, but this is declared int32_t
-                // because the atomic operations require an int32_t
+                // updated by atomic_fetch_add_explicit
+                volatile atomic_uint_fast32_t       mNextUniqueIds[AUDIO_UNIQUE_ID_USE_MAX];
 
                 audio_mode_t                        mMode;
                 bool                                mBtNrecIsOff;
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 3191598..7494930 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -68,7 +68,7 @@
             status);
 
         // If the data is encoded then try again using wrapped PCM.
-        bool wrapperNeeded = !audio_is_linear_pcm(originalConfig.format)
+        bool wrapperNeeded = !audio_has_proportional_frames(originalConfig.format)
                 && ((flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0)
                 && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0);
 
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 8a9a837..aea6b67 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -36,8 +36,6 @@
 
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
-#include <common_time/local_clock.h>
-#include <common_time/cc_helper.h>
 
 #include "AudioMixerOps.h"
 #include "AudioMixer.h"
@@ -786,7 +784,6 @@
                         mMixerInFormat,
                         resamplerChannelCount,
                         devSampleRate, quality);
-                resampler->setLocalTimeFreq(sLocalTimeFreq);
             }
             return true;
         }
@@ -906,13 +903,13 @@
 }
 
 
-void AudioMixer::process(int64_t pts)
+void AudioMixer::process()
 {
-    mState.hook(&mState, pts);
+    mState.hook(&mState);
 }
 
 
-void AudioMixer::process__validate(state_t* state, int64_t pts)
+void AudioMixer::process__validate(state_t* state)
 {
     ALOGW_IF(!state->needsChanged,
         "in process__validate() but nothing's invalid");
@@ -1042,7 +1039,7 @@
         countActiveTracks, state->enabledTracks,
         all16BitsStereoNoResample, resampling, volumeRamp);
 
-   state->hook(state, pts);
+   state->hook(state);
 
     // Now that the volume ramp has been done, set optimal state and
     // track hooks for subsequent mixer process
@@ -1367,7 +1364,7 @@
 }
 
 // no-op case
-void AudioMixer::process__nop(state_t* state, int64_t pts)
+void AudioMixer::process__nop(state_t* state)
 {
     ALOGVV("process__nop\n");
     uint32_t e0 = state->enabledTracks;
@@ -1401,9 +1398,7 @@
                 size_t outFrames = state->frameCount;
                 while (outFrames) {
                     t3.buffer.frameCount = outFrames;
-                    int64_t outputPTS = calculateOutputPTS(
-                        t3, pts, state->frameCount - outFrames);
-                    t3.bufferProvider->getNextBuffer(&t3.buffer, outputPTS);
+                    t3.bufferProvider->getNextBuffer(&t3.buffer);
                     if (t3.buffer.raw == NULL) break;
                     outFrames -= t3.buffer.frameCount;
                     t3.bufferProvider->releaseBuffer(&t3.buffer);
@@ -1414,7 +1409,7 @@
 }
 
 // generic code without resampling
-void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)
+void AudioMixer::process__genericNoResampling(state_t* state)
 {
     ALOGVV("process__genericNoResampling\n");
     int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
@@ -1427,7 +1422,7 @@
         e0 &= ~(1<<i);
         track_t& t = state->tracks[i];
         t.buffer.frameCount = state->frameCount;
-        t.bufferProvider->getNextBuffer(&t.buffer, pts);
+        t.bufferProvider->getNextBuffer(&t.buffer);
         t.frameCount = t.buffer.frameCount;
         t.in = t.buffer.raw;
     }
@@ -1486,9 +1481,7 @@
                         t.bufferProvider->releaseBuffer(&t.buffer);
                         t.buffer.frameCount = (state->frameCount - numFrames) -
                                 (BLOCKSIZE - outFrames);
-                        int64_t outputPTS = calculateOutputPTS(
-                            t, pts, numFrames + (BLOCKSIZE - outFrames));
-                        t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
+                        t.bufferProvider->getNextBuffer(&t.buffer);
                         t.in = t.buffer.raw;
                         if (t.in == NULL) {
                             enabledTracks &= ~(1<<i);
@@ -1522,7 +1515,7 @@
 
 
 // generic code with resampling
-void AudioMixer::process__genericResampling(state_t* state, int64_t pts)
+void AudioMixer::process__genericResampling(state_t* state)
 {
     ALOGVV("process__genericResampling\n");
     // this const just means that local variable outTemp doesn't change
@@ -1561,7 +1554,6 @@
             // acquire/release the buffers because it's done by
             // the resampler.
             if (t.needs & NEEDS_RESAMPLE) {
-                t.resampler->setPTS(pts);
                 t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
             } else {
 
@@ -1569,8 +1561,7 @@
 
                 while (outFrames < numFrames) {
                     t.buffer.frameCount = numFrames - outFrames;
-                    int64_t outputPTS = calculateOutputPTS(t, pts, outFrames);
-                    t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
+                    t.bufferProvider->getNextBuffer(&t.buffer);
                     t.in = t.buffer.raw;
                     // t.in == NULL can happen if the track was flushed just after having
                     // been enabled for mixing.
@@ -1592,8 +1583,7 @@
 }
 
 // one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state,
-                                                           int64_t pts)
+void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
 {
     ALOGVV("process__OneTrack16BitsStereoNoResampling\n");
     // This method is only called when state->enabledTracks has exactly
@@ -1615,8 +1605,7 @@
     const uint32_t vrl = t.volumeRL;
     while (numFrames) {
         b.frameCount = numFrames;
-        int64_t outputPTS = calculateOutputPTS(t, pts, out - t.mainBuffer);
-        t.bufferProvider->getNextBuffer(&b, outputPTS);
+        t.bufferProvider->getNextBuffer(&b);
         const int16_t *in = b.i16;
 
         // in == NULL can happen if the track was flushed just after having
@@ -1677,24 +1666,10 @@
     }
 }
 
-int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
-                                       int outputFrameIndex)
-{
-    if (AudioBufferProvider::kInvalidPTS == basePTS) {
-        return AudioBufferProvider::kInvalidPTS;
-    }
-
-    return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate);
-}
-
-/*static*/ uint64_t AudioMixer::sLocalTimeFreq;
 /*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT;
 
 /*static*/ void AudioMixer::sInitRoutine()
 {
-    LocalClock lc;
-    sLocalTimeFreq = lc.getLocalFreq(); // for the resampler
-
     DownmixerBufferProvider::init(); // for the downmixer
 }
 
@@ -1836,7 +1811,7 @@
  * TA: int32_t (Q4.27)
  */
 template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::process_NoResampleOneTrack(state_t* state, int64_t pts)
+void AudioMixer::process_NoResampleOneTrack(state_t* state)
 {
     ALOGVV("process_NoResampleOneTrack\n");
     // CLZ is faster than CTZ on ARM, though really not sure if true after 31 - clz.
@@ -1852,8 +1827,7 @@
         AudioBufferProvider::Buffer& b(t->buffer);
         // get input buffer
         b.frameCount = numFrames;
-        const int64_t outputPTS = calculateOutputPTS(*t, pts, state->frameCount - numFrames);
-        t->bufferProvider->getNextBuffer(&b, outputPTS);
+        t->bufferProvider->getNextBuffer(&b);
         const TI *in = reinterpret_cast<TI*>(b.raw);
 
         // in == NULL can happen if the track was flushed just after having
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 7165c6c..e788ac3 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -126,7 +126,7 @@
     void        setParameter(int name, int target, int param, void *value);
 
     void        setBufferProvider(int name, AudioBufferProvider* bufferProvider);
-    void        process(int64_t pts);
+    void        process();
 
     uint32_t    trackNames() const { return mTrackNames; }
 
@@ -278,7 +278,7 @@
         void        reconfigureBufferProviders();
     };
 
-    typedef void (*process_hook_t)(state_t* state, int64_t pts);
+    typedef void (*process_hook_t)(state_t* state);
 
     // pad to 32-bytes to fill cache line
     struct state_t {
@@ -328,17 +328,12 @@
     static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
             int32_t* aux);
 
-    static void process__validate(state_t* state, int64_t pts);
-    static void process__nop(state_t* state, int64_t pts);
-    static void process__genericNoResampling(state_t* state, int64_t pts);
-    static void process__genericResampling(state_t* state, int64_t pts);
-    static void process__OneTrack16BitsStereoNoResampling(state_t* state,
-                                                          int64_t pts);
+    static void process__validate(state_t* state);
+    static void process__nop(state_t* state);
+    static void process__genericNoResampling(state_t* state);
+    static void process__genericResampling(state_t* state);
+    static void process__OneTrack16BitsStereoNoResampling(state_t* state);
 
-    static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
-                                      int outputFrameIndex);
-
-    static uint64_t         sLocalTimeFreq;
     static pthread_once_t   sOnceControl;
     static void             sInitRoutine();
 
@@ -359,7 +354,7 @@
 
     // multi-format process hooks
     template <int MIXTYPE, typename TO, typename TI, typename TA>
-    static void process_NoResampleOneTrack(state_t* state, int64_t pts);
+    static void process_NoResampleOneTrack(state_t* state);
 
     // multi-format track hooks
     template <int MIXTYPE, typename TO, typename TI, typename TA>
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index e49b7b1..4f8b413 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -29,7 +29,8 @@
 #include "AudioResamplerDyn.h"
 
 #ifdef __arm__
-    #define ASM_ARM_RESAMP1 // enable asm optimisation for ResamplerOrder1
+    // bug 13102576
+    //#define ASM_ARM_RESAMP1 // enable asm optimisation for ResamplerOrder1
 #endif
 
 namespace android {
@@ -261,8 +262,8 @@
         int32_t sampleRate, src_quality quality) :
         mChannelCount(inChannelCount),
         mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0),
-        mPhaseFraction(0), mLocalTimeFreq(0),
-        mPTS(AudioBufferProvider::kInvalidPTS), mQuality(quality) {
+        mPhaseFraction(0),
+        mQuality(quality) {
 
     const int maxChannels = quality < DYN_LOW_QUALITY ? 2 : 8;
     if (inChannelCount < 1
@@ -304,23 +305,6 @@
     mVolume[1] = u4_12_from_float(clampFloatVol(right));
 }
 
-void AudioResampler::setLocalTimeFreq(uint64_t freq) {
-    mLocalTimeFreq = freq;
-}
-
-void AudioResampler::setPTS(int64_t pts) {
-    mPTS = pts;
-}
-
-int64_t AudioResampler::calculateOutputPTS(int outputFrameIndex) {
-
-    if (mPTS == AudioBufferProvider::kInvalidPTS) {
-        return AudioBufferProvider::kInvalidPTS;
-    } else {
-        return mPTS + ((outputFrameIndex * mLocalTimeFreq) / mSampleRate);
-    }
-}
-
 void AudioResampler::reset() {
     mInputIndex = 0;
     mPhaseFraction = 0;
@@ -368,8 +352,7 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer,
-                                    calculateOutputPTS(outputIndex / 2));
+            provider->getNextBuffer(&mBuffer);
             if (mBuffer.raw == NULL) {
                 goto resampleStereo16_exit;
             }
@@ -465,8 +448,7 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer,
-                                    calculateOutputPTS(outputIndex / 2));
+            provider->getNextBuffer(&mBuffer);
             if (mBuffer.raw == NULL) {
                 mInputIndex = inputIndex;
                 mPhaseFraction = phaseFraction;
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index a8e3e6f..c4627e8 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -59,10 +59,6 @@
     virtual void init() = 0;
     virtual void setSampleRate(int32_t inSampleRate);
     virtual void setVolume(float left, float right);
-    virtual void setLocalTimeFreq(uint64_t freq);
-
-    // set the PTS of the next buffer output by the resampler
-    virtual void setPTS(int64_t pts);
 
     // Resample int16_t samples from provider and accumulate into 'out'.
     // A mono provider delivers a sequence of samples.
@@ -103,8 +99,6 @@
     AudioResampler(const AudioResampler&);
     AudioResampler& operator=(const AudioResampler&);
 
-    int64_t calculateOutputPTS(int outputFrameIndex);
-
     const int32_t mChannelCount;
     const int32_t mSampleRate;
     int32_t mInSampleRate;
@@ -117,8 +111,6 @@
     size_t mInputIndex;
     int32_t mPhaseIncrement;
     uint32_t mPhaseFraction;
-    uint64_t mLocalTimeFreq;
-    int64_t mPTS;
 
     // returns the inFrameCount required to generate outFrameCount frames.
     //
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 172c2a5..9c3c7cb 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -66,7 +66,7 @@
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
-        provider->getNextBuffer(&mBuffer, mPTS);
+        provider->getNextBuffer(&mBuffer);
         if (mBuffer.raw == NULL) {
             return 0;
         }
@@ -75,7 +75,6 @@
     int16_t *in = mBuffer.i16;
 
     while (outputIndex < outputSampleCount) {
-        int32_t sample;
         int32_t x;
 
         // calculate output sample
@@ -97,8 +96,7 @@
                 inputIndex = 0;
                 provider->releaseBuffer(&mBuffer);
                 mBuffer.frameCount = inFrameCount;
-                provider->getNextBuffer(&mBuffer,
-                                        calculateOutputPTS(outputIndex / 2));
+                provider->getNextBuffer(&mBuffer);
                 if (mBuffer.raw == NULL) {
                     goto save_state;  // ugly, but efficient
                 }
@@ -135,7 +133,7 @@
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
-        provider->getNextBuffer(&mBuffer, mPTS);
+        provider->getNextBuffer(&mBuffer);
         if (mBuffer.raw == NULL) {
             return 0;
         }
@@ -166,8 +164,7 @@
                 inputIndex = 0;
                 provider->releaseBuffer(&mBuffer);
                 mBuffer.frameCount = inFrameCount;
-                provider->getNextBuffer(&mBuffer,
-                                        calculateOutputPTS(outputIndex / 2));
+                provider->getNextBuffer(&mBuffer);
                 if (mBuffer.raw == NULL) {
                     goto save_state;  // ugly, but efficient
                 }
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index 6481b85..e615700 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -282,7 +282,6 @@
         return;
     }
     int32_t oldSampleRate = mInSampleRate;
-    int32_t oldHalfNumCoefs = mConstants.mHalfNumCoefs;
     uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift;
     bool useS32 = false;
 
@@ -527,8 +526,7 @@
         // We may not fetch a new buffer if the existing data is sufficient.
         while (mBuffer.frameCount == 0 && inFrameCount > 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer,
-                    calculateOutputPTS(outputIndex / OUTPUT_CHANNELS));
+            provider->getNextBuffer(&mBuffer);
             if (mBuffer.raw == NULL) {
                 goto resample_exit;
             }
diff --git a/services/audioflinger/AudioResamplerFirOps.h b/services/audioflinger/AudioResamplerFirOps.h
index 658285d..2a26496 100644
--- a/services/audioflinger/AudioResamplerFirOps.h
+++ b/services/audioflinger/AudioResamplerFirOps.h
@@ -26,11 +26,15 @@
 #endif
 
 #if defined(__aarch64__) || defined(__ARM_NEON__)
+#ifndef USE_NEON
 #define USE_NEON (true)
-#include <arm_neon.h>
+#endif
 #else
 #define USE_NEON (false)
 #endif
+#if USE_NEON
+#include <arm_neon.h>
+#endif
 
 template<typename T, typename U>
 struct is_same
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index 41730ee..320b8cf 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -43,10 +43,14 @@
 #endif
 
 #if defined(__aarch64__) || defined(__ARM_NEON__)
-#include <arm_neon.h>
-#define USE_NEON
+#ifndef USE_NEON
+#define USE_NEON (true)
+#endif
 #else
-#undef USE_NEON
+#define USE_NEON (false)
+#endif
+#if USE_NEON
+#include <arm_neon.h>
 #endif
 
 #define UNUSED(x) ((void)(x))
@@ -137,6 +141,8 @@
 
 // ----------------------------------------------------------------------------
 
+#if !USE_NEON
+
 static inline
 int32_t mulRL(int left, int32_t in, uint32_t vRL)
 {
@@ -198,6 +204,8 @@
 #endif
 }
 
+#endif // !USE_NEON
+
 // ----------------------------------------------------------------------------
 
 AudioResamplerSinc::AudioResamplerSinc(
@@ -301,8 +309,7 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer,
-                                    calculateOutputPTS(outputIndex / 2));
+            provider->getNextBuffer(&mBuffer);
             if (mBuffer.raw == NULL) {
                 goto resample_exit;
             }
@@ -418,7 +425,7 @@
 
     size_t count = offset;
 
-#ifndef USE_NEON
+#if !USE_NEON
     int32_t l = 0;
     int32_t r = 0;
     for (size_t i=0 ; i<count ; i++) {
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index b6d1be7..6026bbb 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -35,7 +35,7 @@
         , mFramesWrittenAtStandby(0)
         , mRenderPosition(0)
         , mRateMultiplier(1)
-        , mHalFormatIsLinearPcm(false)
+        , mHalFormatHasProportionalFrames(false)
         , mHalFrameSize(0)
 {
 }
@@ -96,7 +96,7 @@
 
     // Adjust for standby using HAL rate frames.
     // Only apply this correction if the HAL is getting PCM frames.
-    if (mHalFormatIsLinearPcm) {
+    if (mHalFormatHasProportionalFrames) {
         uint64_t adjustedPosition = (halPosition <= mFramesWrittenAtStandby) ?
                 0 : (halPosition - mFramesWrittenAtStandby);
         // Scale from HAL sample rate to application rate.
@@ -116,16 +116,21 @@
         const char *address)
 {
     audio_stream_out_t *outStream;
+
+    audio_output_flags_t customFlags = (config->format == AUDIO_FORMAT_IEC61937)
+                ? (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO)
+                : flags;
+
     int status = hwDev()->open_output_stream(
             hwDev(),
             handle,
             devices,
-            flags,
+            customFlags,
             config,
             &outStream,
             address);
-    ALOGV("AudioStreamOut::open(), HAL open_output_stream returned "
-            " %p, sampleRate %d, Format %#x, "
+    ALOGV("AudioStreamOut::open(), HAL returned "
+            " stream %p, sampleRate %d, Format %#x, "
             "channelMask %#x, status %d",
             outStream,
             config->sample_rate,
@@ -133,10 +138,26 @@
             config->channel_mask,
             status);
 
+    // Some HALs may not recognize AUDIO_FORMAT_IEC61937. But if we declare
+    // it as PCM then it will probably work.
+    if (status != NO_ERROR && config->format == AUDIO_FORMAT_IEC61937) {
+        struct audio_config customConfig = *config;
+        customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+
+        status = hwDev()->open_output_stream(
+                hwDev(),
+                handle,
+                devices,
+                customFlags,
+                &customConfig,
+                &outStream,
+                address);
+        ALOGV("AudioStreamOut::open(), treat IEC61937 as PCM, status = %d", status);
+    }
+
     if (status == NO_ERROR) {
         stream = outStream;
-        mHalFormatIsLinearPcm = audio_is_linear_pcm(config->format);
-        ALOGI("AudioStreamOut::open(), mHalFormatIsLinearPcm = %d", (int)mHalFormatIsLinearPcm);
+        mHalFormatHasProportionalFrames = audio_has_proportional_frames(config->format);
         mHalFrameSize = audio_stream_out_frame_size(stream);
     }
 
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index 06a2277..768f537 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -106,7 +106,7 @@
     uint64_t             mFramesWrittenAtStandby;
     uint64_t             mRenderPosition; // reset by flush or standby
     int                  mRateMultiplier;
-    bool                 mHalFormatIsLinearPcm;
+    bool                 mHalFormatHasProportionalFrames;
     size_t               mHalFrameSize;
 };
 
diff --git a/services/audioflinger/AutoPark.h b/services/audioflinger/AutoPark.h
new file mode 100644
index 0000000..e539e47
--- /dev/null
+++ b/services/audioflinger/AutoPark.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android {
+
+// T is FastMixer or FastCapture
+template<typename T> class AutoPark {
+public:
+
+    // Park the specific FastThread, which can be nullptr, in hot idle if not currently idling
+    AutoPark(const sp<T>& fastThread) : mFastThread(fastThread)
+    {
+        mPreviousCommand = FastThreadState::HOT_IDLE;
+        if (fastThread != nullptr) {
+            auto sq = mFastThread->sq();
+            FastThreadState *state = sq->begin();
+            if (!(state->mCommand & FastThreadState::IDLE)) {
+                mPreviousCommand = state->mCommand;
+                state->mCommand = FastThreadState::HOT_IDLE;
+                sq->end();
+                sq->push(sq->BLOCK_UNTIL_ACKED);
+            } else {
+                sq->end(false /*didModify*/);
+            }
+        }
+    }
+
+    // Remove the FastThread from hot idle if necessary
+    ~AutoPark()
+    {
+        if (!(mPreviousCommand & FastThreadState::IDLE)) {
+            ALOG_ASSERT(mFastThread != nullptr);
+            auto sq = mFastThread->sq();
+            FastThreadState *state = sq->begin();
+            ALOG_ASSERT(state->mCommand == FastThreadState::HOT_IDLE);
+            state->mCommand = mPreviousCommand;
+            sq->end();
+            sq->push(sq->BLOCK_UNTIL_PUSHED);
+        }
+    }
+
+private:
+    const sp<T>                 mFastThread;
+    // if !&IDLE, holds the FastThread state to restore after new parameters processed
+    FastThreadState::Command    mPreviousCommand;
+};  // class AutoPark
+
+}   // namespace
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
index a8be206..2ca2cac 100644
--- a/services/audioflinger/BufferProviders.cpp
+++ b/services/audioflinger/BufferProviders.cpp
@@ -70,13 +70,12 @@
     free(mLocalBufferData);
 }
 
-status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts)
+status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer)
 {
-    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
-    //        this, pBuffer, pBuffer->frameCount, pts);
+    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu))",
+    //        this, pBuffer, pBuffer->frameCount);
     if (mLocalBufferFrameCount == 0) {
-        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer);
         if (res == OK) {
             copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
         }
@@ -84,7 +83,7 @@
     }
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = pBuffer->frameCount;
-        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer);
         // At one time an upstream buffer provider had
         // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
         //
@@ -356,13 +355,13 @@
 }
 
 status_t TimestretchBufferProvider::getNextBuffer(
-        AudioBufferProvider::Buffer *pBuffer, int64_t pts)
+        AudioBufferProvider::Buffer *pBuffer)
 {
-    ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
-            this, pBuffer, pBuffer->frameCount, pts);
+    ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu))",
+            this, pBuffer, pBuffer->frameCount);
 
     // BYPASS
-    //return mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+    //return mTrackBufferProvider->getNextBuffer(pBuffer);
 
     // check if previously processed data is sufficient.
     if (pBuffer->frameCount <= mRemaining) {
@@ -391,7 +390,7 @@
         mBuffer.frameCount = mPlaybackRate.mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL
                 ? outputDesired : outputDesired * mPlaybackRate.mSpeed + 1;
 
-        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer);
 
         ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
         if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
diff --git a/services/audioflinger/BufferProviders.h b/services/audioflinger/BufferProviders.h
index 4bc895c..abd43c6 100644
--- a/services/audioflinger/BufferProviders.h
+++ b/services/audioflinger/BufferProviders.h
@@ -64,7 +64,7 @@
     virtual ~CopyBufferProvider();
 
     // Overrides AudioBufferProvider methods
-    virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
+    virtual status_t getNextBuffer(Buffer *buffer);
     virtual void releaseBuffer(Buffer *buffer);
 
     // Overrides PassthruBufferProvider
@@ -156,7 +156,7 @@
     virtual ~TimestretchBufferProvider();
 
     // Overrides AudioBufferProvider methods
-    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
+    virtual status_t getNextBuffer(Buffer* buffer);
     virtual void releaseBuffer(Buffer* buffer);
 
     // Overrides PassthruBufferProvider
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index eb52dee..055e915 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -59,7 +59,7 @@
                                         const wp<AudioFlinger::EffectChain>& chain,
                                         effect_descriptor_t *desc,
                                         int id,
-                                        int sessionId)
+                                        audio_session_t sessionId)
     : mPinned(sessionId > AUDIO_SESSION_OUTPUT_MIX),
       mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
       mDescriptor(*desc),
@@ -138,7 +138,7 @@
     } else {
         status = ALREADY_EXISTS;
     }
-    ALOGV("addHandle() %p added handle %p in position %d", this, handle, i);
+    ALOGV("addHandle() %p added handle %p in position %zu", this, handle, i);
     mHandles.insertAt(handle, i);
     return status;
 }
@@ -156,7 +156,7 @@
     if (i == size) {
         return size;
     }
-    ALOGV("removeHandle() %p removed handle %p in position %d", this, handle, i);
+    ALOGV("removeHandle() %p removed handle %p in position %zu", this, handle, i);
 
     mHandles.removeAt(i);
     // if removed from first place, move effect control from this handle to next in line
@@ -380,7 +380,7 @@
     mConfig.inputCfg.buffer.frameCount = thread->frameCount();
     mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
 
-    ALOGV("configure() %p thread %p buffer %p framecount %d",
+    ALOGV("configure() %p thread %p buffer %p framecount %zu",
             this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
 
     status_t cmdStatus;
@@ -683,7 +683,6 @@
     if (isProcessEnabled() &&
             ((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
             (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
-        status_t cmdStatus;
         uint32_t volume[2];
         uint32_t *pVolume = NULL;
         uint32_t size = sizeof(volume);
@@ -940,7 +939,7 @@
 
     int len = s.length();
     if (s.length() > 2) {
-        char *str = s.lockBuffer(len);
+        (void) s.lockBuffer(len);
         s.unlockBuffer(len - 2);
     }
     return s;
@@ -1057,7 +1056,7 @@
     mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
     if (mCblkMemory == 0 ||
             (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) {
-        ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE +
+        ALOGE("not enough memory for Effect size=%zu", EFFECT_PARAM_BUFFER_SIZE +
                 sizeof(effect_param_cblk_t));
         mCblkMemory.clear();
         return;
@@ -1347,7 +1346,7 @@
 #define LOG_TAG "AudioFlinger::EffectChain"
 
 AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
-                                        int sessionId)
+                                        audio_session_t sessionId)
     : mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
       mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
       mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX), mForceVolume(false)
@@ -1586,7 +1585,7 @@
         }
         mEffects.insertAt(effect, idx_insert);
 
-        ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this,
+        ALOGV("addEffect_l() effect %p, added in chain %p at rank %zu", effect.get(), this,
                 idx_insert);
     }
     effect->configure();
@@ -1618,7 +1617,7 @@
                 }
             }
             mEffects.removeAt(i);
-            ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(),
+            ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
                     this, i);
             break;
         }
@@ -1733,7 +1732,7 @@
     String8 result;
 
     size_t numEffects = mEffects.size();
-    snprintf(buffer, SIZE, "    %d effects for session %d\n", numEffects, mSessionId);
+    snprintf(buffer, SIZE, "    %zu effects for session %d\n", numEffects, mSessionId);
     result.append(buffer);
 
     if (numEffects) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 6f93f81..bc9bc94 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -45,7 +45,7 @@
                     const wp<AudioFlinger::EffectChain>& chain,
                     effect_descriptor_t *desc,
                     int id,
-                    int sessionId);
+                    audio_session_t sessionId);
     virtual ~EffectModule();
 
     enum effect_state {
@@ -76,7 +76,7 @@
     uint32_t status() {
         return mStatus;
     }
-    int sessionId() const {
+    audio_session_t sessionId() const {
         return mSessionId;
     }
     status_t    setEnabled(bool enabled);
@@ -141,7 +141,7 @@
     wp<ThreadBase>      mThread;    // parent thread
     wp<EffectChain>     mChain;     // parent effect chain
     const int           mId;        // this instance unique ID
-    const int           mSessionId; // audio session ID
+    const audio_session_t mSessionId; // audio session ID
     const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
     effect_config_t     mConfig;    // input and output audio configuration
     effect_handle_t  mEffectInterface; // Effect module C API
@@ -235,15 +235,17 @@
 
 // the EffectChain class represents a group of effects associated to one audio session.
 // There can be any number of EffectChain objects per output mixer thread (PlaybackThread).
-// The EffecChain with session ID 0 contains global effects applied to the output mix.
+// The EffectChain with session ID AUDIO_SESSION_OUTPUT_MIX contains global effects applied
+// to the output mix.
 // Effects in this chain can be insert or auxiliary. Effects in other chains (attached to
 // tracks) are insert only. The EffectChain maintains an ordered list of effect module, the
-// order corresponding in the effect process order. When attached to a track (session ID != 0),
+// order corresponding in the effect process order. When attached to a track (session ID !=
+// AUDIO_SESSION_OUTPUT_MIX),
 // it also provide it's own input buffer used by the track as accumulation buffer.
 class EffectChain : public RefBase {
 public:
-    EffectChain(const wp<ThreadBase>& wThread, int sessionId);
-    EffectChain(ThreadBase *thread, int sessionId);
+    EffectChain(const wp<ThreadBase>& wThread, audio_session_t sessionId);
+    EffectChain(ThreadBase *thread, audio_session_t sessionId);
     virtual ~EffectChain();
 
     // special key used for an entry in mSuspendedEffects keyed vector
@@ -266,8 +268,8 @@
     status_t addEffect_l(const sp<EffectModule>& handle);
     size_t removeEffect_l(const sp<EffectModule>& handle);
 
-    int sessionId() const { return mSessionId; }
-    void setSessionId(int sessionId) { mSessionId = sessionId; }
+    audio_session_t sessionId() const { return mSessionId; }
+    void setSessionId(audio_session_t sessionId) { mSessionId = sessionId; }
 
     sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
     sp<EffectModule> getEffectFromId_l(int id);
@@ -362,7 +364,7 @@
     wp<ThreadBase> mThread;     // parent mixer thread
     Mutex mLock;                // mutex protecting effect list
     Vector< sp<EffectModule> > mEffects; // list of effect modules
-    int mSessionId;             // audio session ID
+    audio_session_t mSessionId; // audio session ID
     int16_t *mInBuffer;         // chain input buffer
     int16_t *mOutBuffer;        // chain output buffer
 
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index 1bba5f6..d202169 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -104,8 +104,10 @@
         } else {
             mFormat = mInputSource->format();
             mSampleRate = Format_sampleRate(mFormat);
+#if !LOG_NDEBUG
             unsigned channelCount = Format_channelCount(mFormat);
             ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_8);
+#endif
         }
         dumpState->mSampleRate = mSampleRate;
         eitherChanged = true;
@@ -166,8 +168,7 @@
         ALOG_ASSERT(mReadBuffer != NULL);
         dumpState->mReadSequence++;
         ATRACE_BEGIN("read");
-        ssize_t framesRead = mInputSource->read(mReadBuffer, frameCount,
-                AudioBufferProvider::kInvalidPTS);
+        ssize_t framesRead = mInputSource->read(mReadBuffer, frameCount);
         ATRACE_END();
         dumpState->mReadSequence++;
         if (framesRead >= 0) {
@@ -187,7 +188,6 @@
         ALOG_ASSERT(mPipeSink != NULL);
         ALOG_ASSERT(mReadBuffer != NULL);
         if (mReadBufferState < 0) {
-            unsigned channelCount = Format_channelCount(mFormat);
             memset(mReadBuffer, 0, frameCount * Format_frameSize(mFormat));
             mReadBufferState = frameCount;
         }
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 45c68b5..01f3939 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -37,18 +37,16 @@
 #include <cpustats/ThreadCpuUsage.h>
 #endif
 #endif
+#include <audio_utils/conversion.h>
 #include <audio_utils/format.h>
 #include "AudioMixer.h"
 #include "FastMixer.h"
 
-#define FCC_2                       2   // fixed channel count assumption
-
 namespace android {
 
 /*static*/ const FastMixerState FastMixer::sInitial;
 
 FastMixer::FastMixer() : FastThread(),
-    mSlopNs(0),
     // mFastTrackNames
     // mGenerations
     mOutputSink(NULL),
@@ -66,7 +64,8 @@
     mFastTracksGen(0),
     mTotalNativeFramesWritten(0),
     // timestamp
-    mNativeFramesWrittenButNotPresented(0)   // the = 0 is to silence the compiler
+    mNativeFramesWrittenButNotPresented(0),   // the = 0 is to silence the compiler
+    mMasterMono(false)
 {
     // FIXME pass sInitial as parameter to base class constructor, and make it static local
     mPrevious = &sInitial;
@@ -78,7 +77,7 @@
     mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
 
     unsigned i;
-    for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
+    for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
         mFastTrackNames[i] = -1;
         mGenerations[i] = 0;
     }
@@ -141,6 +140,10 @@
     FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
     const size_t frameCount = current->mFrameCount;
 
+    // update boottime offset, in case it has changed
+    mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
+            mBoottimeOffset.load();
+
     // handle state change here, but since we want to diff the state,
     // we're prepared for previous == &sInitial the first time through
     unsigned previousTrackMask;
@@ -184,7 +187,7 @@
             // FIXME new may block for unbounded time at internal mutex of the heap
             //       implementation; it would be better to have normal mixer allocate for us
             //       to avoid blocking here and to prevent possible priority inversion
-            mMixer = new AudioMixer(frameCount, mSampleRate, FastMixerState::kMaxFastTracks);
+            mMixer = new AudioMixer(frameCount, mSampleRate, FastMixerState::sMaxFastTracks);
             const size_t mixerFrameSize = mSinkChannelCount
                     * audio_bytes_per_sample(mMixerBufferFormat);
             mMixerBufferSize = mixerFrameSize * frameCount;
@@ -211,7 +214,7 @@
         }
         mMixerBufferState = UNDEFINED;
 #if !LOG_NDEBUG
-        for (unsigned i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
+        for (unsigned i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
             mFastTrackNames[i] = -1;
         }
 #endif
@@ -334,6 +337,11 @@
 
     if ((command & FastMixerState::MIX) && (mMixer != NULL) && mIsWarm) {
         ALOG_ASSERT(mMixerBuffer != NULL);
+
+        // AudioMixer::mState.enabledTracks is undefined if mState.hook == process__validate,
+        // so we keep a side copy of enabledTracks
+        bool anyEnabledTracks = false;
+
         // for each track, update volume and check for underrun
         unsigned currentTrackMask = current->mTrackMask;
         while (currentTrackMask != 0) {
@@ -341,21 +349,23 @@
             currentTrackMask &= ~(1 << i);
             const FastTrack* fastTrack = &current->mFastTracks[i];
 
-            // Refresh the per-track timestamp
-            if (mTimestampStatus == NO_ERROR) {
-                uint32_t trackFramesWrittenButNotPresented =
-                    mNativeFramesWrittenButNotPresented;
-                uint32_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased();
-                // Can't provide an AudioTimestamp before first frame presented,
-                // or during the brief 32-bit wraparound window
-                if (trackFramesWritten >= trackFramesWrittenButNotPresented) {
-                    AudioTimestamp perTrackTimestamp;
-                    perTrackTimestamp.mPosition =
-                            trackFramesWritten - trackFramesWrittenButNotPresented;
-                    perTrackTimestamp.mTime = mTimestamp.mTime;
-                    fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp);
-                }
+            const int64_t trackFramesWrittenButNotPresented =
+                mNativeFramesWrittenButNotPresented;
+            const int64_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased();
+            ExtendedTimestamp perTrackTimestamp(mTimestamp);
+
+            // Can't provide an ExtendedTimestamp before first frame presented.
+            // Also, timestamp may not go to very last frame on stop().
+            if (trackFramesWritten >= trackFramesWrittenButNotPresented &&
+                    perTrackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0) {
+                perTrackTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+                        trackFramesWritten - trackFramesWrittenButNotPresented;
+            } else {
+                perTrackTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+                perTrackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
             }
+            perTrackTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = trackFramesWritten;
+            fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp);
 
             int name = mFastTrackNames[i];
             ALOG_ASSERT(name >= 0);
@@ -392,24 +402,26 @@
                     underruns.mBitFields.mPartial++;
                     underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL;
                     mMixer->enable(name);
+                    anyEnabledTracks = true;
                 }
             } else {
                 underruns.mBitFields.mFull++;
                 underruns.mBitFields.mMostRecent = UNDERRUN_FULL;
                 mMixer->enable(name);
+                anyEnabledTracks = true;
             }
             ftDump->mUnderruns = underruns;
             ftDump->mFramesReady = framesReady;
         }
 
-        int64_t pts;
-        if (mOutputSink == NULL || (OK != mOutputSink->getNextWriteTimestamp(&pts))) {
-            pts = AudioBufferProvider::kInvalidPTS;
+        if (anyEnabledTracks) {
+            // process() is CPU-bound
+            mMixer->process();
+            mMixerBufferState = MIXED;
+        } else if (mMixerBufferState != ZEROED) {
+            mMixerBufferState = UNDEFINED;
         }
 
-        // process() is CPU-bound
-        mMixer->process(pts);
-        mMixerBufferState = MIXED;
     } else if (mMixerBufferState == MIXED) {
         mMixerBufferState = UNDEFINED;
     }
@@ -419,6 +431,11 @@
             memset(mMixerBuffer, 0, mMixerBufferSize);
             mMixerBufferState = ZEROED;
         }
+
+        if (mMasterMono.load()) {  // memory_order_seq_cst
+            mono_blend(mMixerBuffer, mMixerBufferFormat, Format_channelCount(mFormat), frameCount,
+                    true /*limit*/);
+        }
         // prepare the buffer used to write to sink
         void *buffer = mSinkBuffer != NULL ? mSinkBuffer : mMixerBuffer;
         if (mFormat.mFormat != mMixerBufferFormat) { // sink format not the same as mixer format
@@ -450,17 +467,36 @@
         mAttemptedWrite = true;
         // FIXME count # of writes blocked excessively, CPU usage, etc. for dump
 
-        mTimestampStatus = mOutputSink->getTimestamp(mTimestamp);
-        if (mTimestampStatus == NO_ERROR) {
-            uint32_t totalNativeFramesPresented = mTimestamp.mPosition;
+        ExtendedTimestamp timestamp; // local
+        status_t status = mOutputSink->getTimestamp(timestamp);
+        if (status == NO_ERROR) {
+            const int64_t totalNativeFramesPresented =
+                    timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
             if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
                 mNativeFramesWrittenButNotPresented =
                     mTotalNativeFramesWritten - totalNativeFramesPresented;
+                mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+                        timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+                        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
             } else {
                 // HAL reported that more frames were presented than were written
-                mTimestampStatus = INVALID_OPERATION;
+                mNativeFramesWrittenButNotPresented = 0;
+                status = INVALID_OPERATION;
             }
         }
+        if (status == NO_ERROR) {
+            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+                    mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+        } else {
+            // fetch server time if we can't get timestamp
+            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+                    systemTime(SYSTEM_TIME_MONOTONIC);
+            // clear out kernel cached position as this may get rapidly stale
+            // if we never get a new valid timestamp
+            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+        }
     }
 }
 
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 06a68fb..bdfd8a0 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_AUDIO_FAST_MIXER_H
 #define ANDROID_AUDIO_FAST_MIXER_H
 
+#include <atomic>
 #include "FastThread.h"
 #include "StateQueue.h"
 #include "FastMixerState.h"
@@ -36,6 +37,10 @@
 
             FastMixerStateQueue* sq();
 
+    virtual void setMasterMono(bool mono) { mMasterMono.store(mono); /* memory_order_seq_cst */ }
+    virtual void setBoottimeOffset(int64_t boottimeOffset) {
+        mBoottimeOffset.store(boottimeOffset); /* memory_order_seq_cst */
+    }
 private:
             FastMixerStateQueue mSQ;
 
@@ -52,7 +57,6 @@
     static const FastMixerState sInitial;
 
     FastMixerState  mPreIdle;   // copy of state before we went into idle
-    long            mSlopNs;    // accumulated time we've woken up too early (> 0) or too late (< 0)
     int             mFastTrackNames[FastMixerState::kMaxFastTracks];
                                 // handles used by mixer to identify tracks
     int             mGenerations[FastMixerState::kMaxFastTracks];
@@ -76,12 +80,15 @@
     unsigned        mSampleRate;
     int             mFastTracksGen;
     FastMixerDumpState mDummyFastMixerDumpState;
-    uint32_t        mTotalNativeFramesWritten;  // copied to dumpState->mFramesWritten
+    int64_t         mTotalNativeFramesWritten;  // copied to dumpState->mFramesWritten
 
     // next 2 fields are valid only when timestampStatus == NO_ERROR
-    AudioTimestamp  mTimestamp;
-    uint32_t        mNativeFramesWrittenButNotPresented;
+    ExtendedTimestamp mTimestamp;
+    int64_t         mNativeFramesWrittenButNotPresented;
 
+    // accessed without lock between multiple threads.
+    std::atomic_bool mMasterMono;
+    std::atomic_int_fast64_t mBoottimeOffset;
 };  // class FastMixer
 
 }   // namespace android
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index b10942b..2326e2a 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -166,10 +166,10 @@
     // Instead we always display all tracks, with an indication
     // of whether we think the track is active.
     uint32_t trackMask = mTrackMask;
-    dprintf(fd, "  Fast tracks: kMaxFastTracks=%u activeMask=%#x\n",
-            FastMixerState::kMaxFastTracks, trackMask);
+    dprintf(fd, "  Fast tracks: sMaxFastTracks=%u activeMask=%#x\n",
+            FastMixerState::sMaxFastTracks, trackMask);
     dprintf(fd, "  Index Active Full Partial Empty  Recent Ready\n");
-    for (uint32_t i = 0; i < FastMixerState::kMaxFastTracks; ++i, trackMask >>= 1) {
+    for (uint32_t i = 0; i < FastMixerState::sMaxFastTracks; ++i, trackMask >>= 1) {
         bool isActive = trackMask & 1;
         const FastTrackDump *ftDump = &mTracks[i];
         const FastTrackUnderruns& underruns = ftDump->mUnderruns;
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index a8c2634..ad471fb 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include <cutils/properties.h>
 #include "FastMixerState.h"
 
 namespace android {
@@ -33,6 +34,10 @@
     mFastTracksGen(0), mTrackMask(0), mOutputSink(NULL), mOutputSinkGen(0),
     mFrameCount(0), mTeeSink(NULL)
 {
+    int ok = pthread_once(&sMaxFastTracksOnce, sMaxFastTracksInit);
+    if (ok != 0) {
+        ALOGE("%s pthread_once failed: %d", __func__, ok);
+    }
 }
 
 FastMixerState::~FastMixerState()
@@ -40,6 +45,12 @@
 }
 
 // static
+unsigned FastMixerState::sMaxFastTracks = kDefaultFastTracks;
+
+// static
+pthread_once_t FastMixerState::sMaxFastTracksOnce = PTHREAD_ONCE_INIT;
+
+// static
 const char *FastMixerState::commandToString(Command command)
 {
     const char *str = FastThreadState::commandToString(command);
@@ -54,4 +65,18 @@
     LOG_ALWAYS_FATAL("%s", __func__);
 }
 
+// static
+void FastMixerState::sMaxFastTracksInit()
+{
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get("ro.audio.max_fast_tracks", value, NULL) > 0) {
+        char *endptr;
+        unsigned long ul = strtoul(value, &endptr, 0);
+        if (*endptr == '\0' && kMinFastTracks <= ul && ul <= kMaxFastTracks) {
+            sMaxFastTracks = (unsigned) ul;
+        }
+    }
+    ALOGI("sMaxFastTracks = %u", sMaxFastTracks);
+}
+
 }   // namespace android
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 916514f..5a55c7a 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -54,7 +54,13 @@
                 FastMixerState();
     /*virtual*/ ~FastMixerState();
 
-    static const unsigned kMaxFastTracks = 8;   // must be between 2 and 32 inclusive
+    // These are the minimum, maximum, and default values for maximum number of fast tracks
+    static const unsigned kMinFastTracks = 2;
+    static const unsigned kMaxFastTracks = 32;
+    static const unsigned kDefaultFastTracks = 8;
+
+    static unsigned sMaxFastTracks;             // Configured maximum number of fast tracks
+    static pthread_once_t sMaxFastTracksOnce;   // Protects initializer for sMaxFastTracks
 
     // all pointer fields use raw pointers; objects are owned and ref-counted by the normal mixer
     FastTrack   mFastTracks[kMaxFastTracks];
@@ -76,6 +82,10 @@
 
     // never returns NULL; asserts if command is invalid
     static const char *commandToString(Command command);
+
+    // initialize sMaxFastTracks
+    static void sMaxFastTracksInit();
+
 };  // struct FastMixerState
 
 }   // namespace android
diff --git a/services/audioflinger/LinearMap.h b/services/audioflinger/LinearMap.h
new file mode 100644
index 0000000..fca14dd
--- /dev/null
+++ b/services/audioflinger/LinearMap.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_LINEAR_MAP_H
+#define ANDROID_LINEAR_MAP_H
+
+#include <stdint.h>
+
+namespace android {
+
+/*
+A general purpose lookup utility that defines a mapping between X and Y as a
+continuous set of line segments with shared (x, y) end-points.
+The (x, y) points must be added in order, monotonically increasing in both x and y;
+a log warning is emitted if this does not happen (See general usage notes below).
+
+A limited history of (x, y) points is kept for space reasons (See general usage notes).
+
+In AudioFlinger, we use the LinearMap to associate track frames to
+sink frames.  When we want to obtain a client track timestamp, we first
+get a timestamp from the sink.  The sink timestamp's position (mPosition)
+corresponds to the sink frames written. We use LinearMap to figure out which track frame
+the sink frame corresponds to. This allows us to substitute a track frame for the
+the sink frame (keeping the mTime identical) and return that timestamp back to the client.
+
+The method findX() can be used to retrieve an x value from a given y value and is
+used for timestamps, similarly for findY() which is provided for completeness.
+
+We update the (track frame, sink frame) points in the LinearMap each time we write data
+to the sink by the AudioFlinger PlaybackThread (MixerThread).
+
+
+AudioFlinger Timestamp Notes:
+
+1) Example: Obtaining a track timestamp during playback.  In this case, the LinearMap
+looks something like this:
+
+Track Frame    Sink Frame
+(track start)
+0              50000  (track starts here, the sink may already be running)
+1000           51000
+2000           52000
+
+When we request a track timestamp, we call the sink getTimestamp() and get for example
+mPosition = 51020.  Using the LinearMap, we find we have played to track frame 1020.
+We substitute the sink mPosition of 51020 with the track position 1020,
+and return that timestamp to the app.
+
+2) Example: Obtaining a track timestamp duing pause. In this case, the LinearMap
+looks something like this:
+
+Track Frame    Sink Frame
+... (some time has gone by)
+15000          30000
+16000          31000
+17000          32000
+(pause here)
+(suppose we call sink getTimestamp() here and get sink mPosition = 31100; that means
+        we have played to track frame 16100.  The track timestamp mPosition will
+        continue to advance until the sink timestamp returns a value of mPosition
+        greater than 32000, corresponding to track frame 17000 when the pause was called).
+17000          33000
+17000          34000
+...
+
+3) If the track underruns, it appears as if a pause was called on that track.
+
+4) If there is an underrun in the HAL layer, then it may be possible that
+the sink getTimestamp() will return a value greater than the number of frames written
+(it should always be less). This should be rare, if not impossible by some
+HAL implementations of the sink getTimestamp. In that case, timing is lost
+and we will return the most recent track frame written.
+
+5) When called with no points in the map, findX() returns the start value (default 0).
+This is consistent with starting after a stop() or flush().
+
+6) Resuming after Track standby will be similar to coming out of pause, as the HAL ensures
+framesWritten() and getTimestamp() are contiguous for non-offloaded/direct tracks.
+
+7) LinearMap works for different speeds and sample rates as it uses
+linear interpolation. Since AudioFlinger only updates speed and sample rate
+exactly at the sample points pushed into the LinearMap, the returned values
+from findX() and findY() are accurate regardless of how many speed or sample
+rate changes are made, so long as the coordinate looked up is within the
+sample history.
+
+General usage notes:
+
+1) In order for the LinearMap to work reliably, you cannot look backwards more
+than the size of its circular buffer history, set upon creation (typically 16).
+If you look back further, the position is extrapolated either from a passed in
+extrapolation parameter or from the oldest line segment.
+
+2) Points must monotonically increase in x and y. The increment between adjacent
+points cannot be greater than signed 32 bits. Wrap in the x, y coordinates are supported,
+since we use differences in our computation.
+
+3) If the frame data is discontinuous (due to stop or flush) call reset() to clear
+the sample counter.
+
+4) If (x, y) are not strictly monotonic increasing, i.e. (x2 > x1) and (y2 > y1),
+then one or both of the inverses y = f(x) or x = g(y) may have multiple solutions.
+In that case, the most recent solution is returned by findX() or findY().  We
+do not warn if (x2 == x1) or (y2 == y1), but we do logcat warn if (x2 < x1) or
+(y2 < y1).
+
+5) Due to rounding it is possible x != findX(findY(x)) or y != findY(findX(y))
+even when the inverse exists. Nevertheless, the values should be close.
+
+*/
+
+template <typename T>
+class LinearMap {
+public:
+    // This enumeration describes the reliability of the findX() or findY() estimation
+    // in descending order.
+    enum FindMethod {
+        FIND_METHOD_INTERPOLATION,           // High reliability (errors due to rounding)
+        FIND_METHOD_FORWARD_EXTRAPOLATION,   // Reliability based on no future speed changes
+        FIND_METHOD_BACKWARD_EXTRAPOLATION,  // Reliability based on prior estimated speed
+        FIND_METHOD_START_VALUE,             // No samples in history, using start value
+    };
+
+    LinearMap(size_t size)
+            : mSize(size),
+              mPos(0), // a circular buffer, so could start anywhere. the first sample is at 1.
+              mSamples(0),
+              // mStepValid(false),      // only valid if mSamples > 1
+              // mExtrapolateTail(false), // only valid if mSamples > 0
+              mX(new T[size]),
+              mY(new T[size]) { }
+
+    ~LinearMap() {
+        delete[] mX;
+        delete[] mY;
+    }
+
+    // Add a new sample point to the linear map.
+    //
+    // The difference between the new sample and the previous sample
+    // in the x or y coordinate must be less than INT32_MAX for purposes
+    // of the linear interpolation or extrapolation.
+    //
+    // The value should be monotonic increasing (e.g. diff >= 0);
+    // logcat warnings are issued if they are not.
+    __attribute__((no_sanitize("integer")))
+    void push(T x, T y) {
+        // Assumption: we assume x, y are monotonic increasing values,
+        // which (can) wrap in precision no less than 32 bits and have
+        // "step" or differences between adjacent points less than 32 bits.
+
+        if (mSamples > 0) {
+            const bool lastStepValid = mStepValid;
+            int32_t xdiff;
+            int32_t ydiff;
+            // check difference assumption here
+            mStepValid = checkedDiff(&xdiff, x, mX[mPos], "x")
+                    & /* bitwise AND to always warn for ydiff, though logical AND is also OK */
+                    checkedDiff(&ydiff, y, mY[mPos], "y");
+
+            // Optimization: do not add a new sample if the line segment would
+            // simply extend the previous line segment.  This extends the useful
+            // history by removing redundant points.
+            if (mSamples > 1 && mStepValid && lastStepValid) {
+                const size_t prev = previousPosition();
+                const int32_t xdiff2 = x - mX[prev];
+                const int32_t ydiff2 = y - mY[prev];
+
+                // if both current step and previous step are valid (non-negative and
+                // less than INT32_MAX for precision greater than 4 bytes)
+                // then the sum of the two steps is valid when the
+                // int32_t difference is non-negative.
+                if (xdiff2 >= 0 && ydiff2 >= 0
+                        && (int64_t)xdiff2 * ydiff == (int64_t)ydiff2 * xdiff) {
+                    // ALOGD("reusing sample! (%u, %u) sample depth %zd", x, y, mSamples);
+                    mX[mPos] = x;
+                    mY[mPos] = y;
+                    return;
+                }
+            }
+        }
+        if (++mPos >= mSize) {
+            mPos = 0;
+        }
+        if (mSamples < mSize) {
+            mExtrapolateTail = false;
+            ++mSamples;
+        } else {
+            // we enable extrapolation beyond the oldest sample
+            // if the sample buffers are completely full and we
+            // no longer know the full history.
+            mExtrapolateTail = true;
+        }
+        mX[mPos] = x;
+        mY[mPos] = y;
+    }
+
+    // clear all samples from the circular array
+    void reset() {
+        // no need to reset mPos, we use a circular buffer.
+        // computed values such as mStepValid are set after a subsequent push().
+        mSamples = 0;
+    }
+
+    // returns true if LinearMap contains at least one sample.
+    bool hasData() const {
+        return mSamples != 0;
+    }
+
+    // find the corresponding X point from a Y point.
+    // See findU for details.
+    __attribute__((no_sanitize("integer")))
+    T findX(T y, FindMethod *method = NULL, double extrapolation = 0.0, T startValue = 0) const {
+        return findU(y, mX, mY, method, extrapolation, startValue);
+    }
+
+    // find the corresponding Y point from a X point.
+    // See findU for details.
+    __attribute__((no_sanitize("integer")))
+    T findY(T x, FindMethod *method = NULL, double extrapolation = 0.0, T startValue = 0) const {
+        return findU(x, mY, mX, method, extrapolation, startValue);
+    }
+
+protected:
+
+    // returns false if the diff is out of int32_t bounds or negative.
+    __attribute__((no_sanitize("integer")))
+    static inline bool checkedDiff(int32_t *diff, T x2, T x1, const char *coord) {
+        if (sizeof(T) >= 8) {
+            const int64_t diff64 = x2 - x1;
+            *diff = (int32_t)diff64;  // intentionally lose precision
+            if (diff64 > INT32_MAX) {
+                ALOGW("LinearMap: %s overflow diff(%lld) from %llu - %llu exceeds INT32_MAX",
+                        coord, (long long)diff64,
+                        (unsigned long long)x2, (unsigned long long)x1);
+                return false;
+            } else if (diff64 < 0) {
+                ALOGW("LinearMap: %s negative diff(%lld) from %llu - %llu",
+                        coord, (long long)diff64,
+                        (unsigned long long)x2, (unsigned long long)x1);
+                return false;
+            }
+            return true;
+        }
+        // for 32 bit integers we cannot detect overflow (it
+        // shows up as a negative difference).
+        *diff = x2 - x1;
+        if (*diff < 0) {
+            ALOGW("LinearMap: %s negative diff(%d) from %u - %u",
+                    coord, *diff, (unsigned)x2, (unsigned)x1);
+            return false;
+        }
+        return true;
+    }
+
+    // Returns the previous position in the mSamples array
+    // going backwards back steps.
+    //
+    // Parameters:
+    //   back: number of backward steps, cannot be less than zero or greater than mSamples.
+    //
+    __attribute__((no_sanitize("integer")))
+    size_t previousPosition(ssize_t back = 1) const {
+        LOG_ALWAYS_FATAL_IF(back < 0 || (size_t)back > mSamples, "Invalid back(%zd)", back);
+        ssize_t position = mPos - back;
+        if (position < 0) position += mSize;
+        return (size_t)position;
+    }
+
+    // A generic implementation of finding the "other coordinate" with coordinates
+    // (u, v) = (x, y) or (u, v) = (y, x).
+    //
+    // Parameters:
+    //   uArray: the u axis samples.
+    //   vArray: the v axis samples.
+    //   method: [out] how the returned value was computed.
+    //   extrapolation: the slope used when extrapolating from the
+    //     first sample value or the last sample value in the history.
+    //     If mExtrapolateTail is set, the slope of the last line segment
+    //     is used if the extrapolation parameter is zero to continue the tail of history.
+    //     At this time, we do not use a different value for forward extrapolation from the
+    //     head of history from backward extrapolation from the tail of history.
+    //     TODO: back extrapolation value could be stored along with mX, mY in history.
+    //   startValue: used only when there are no samples in history. One can detect
+    //     whether there are samples in history by the method hasData().
+    //
+    __attribute__((no_sanitize("integer")))
+    T findU(T v, T *uArray, T *vArray, FindMethod *method,
+            double extrapolation, T startValue) const {
+        if (mSamples == 0) {
+            if (method != NULL) {
+                *method = FIND_METHOD_START_VALUE;
+            }
+            return startValue;  // nothing yet
+        }
+        ssize_t previous = 0;
+        int32_t diff = 0;
+        for (ssize_t i = 0; i < (ssize_t)mSamples; ++i) {
+            size_t current = previousPosition(i);
+
+            // Assumption: even though the type "T" may have precision greater
+            // than 32 bits, the difference between adjacent points is limited to 32 bits.
+            diff = v - vArray[current];
+            if (diff >= 0 ||
+                    (i == (ssize_t)mSamples - 1 && mExtrapolateTail && extrapolation == 0.0)) {
+                // ALOGD("depth = %zd out of %zd", i, limit);
+                if (i == 0) {
+                    if (method != NULL) {
+                        *method = FIND_METHOD_FORWARD_EXTRAPOLATION;
+                    }
+                    return uArray[current] + diff * extrapolation;
+                }
+                // interpolate / extrapolate: For this computation, we
+                // must use differentials here otherwise we have inconsistent
+                // values on modulo wrap. previous is always valid here since
+                // i > 0.  we also perform rounding with the assumption
+                // that uStep, vStep, and diff are non-negative.
+                int32_t uStep = uArray[previous] - uArray[current]; // non-negative
+                int32_t vStep = vArray[previous] - vArray[current]; // positive
+                T u = uStep <= 0 || vStep <= 0 ?  // we do not permit negative ustep or vstep
+                        uArray[current]
+                      : ((int64_t)diff * uStep + (vStep >> 1)) / vStep + uArray[current];
+                // ALOGD("u:%u  diff:%d  uStep:%d  vStep:%d  u_current:%d",
+                //         u, diff, uStep, vStep, uArray[current]);
+                if (method != NULL) {
+                    *method = (diff >= 0) ?
+                            FIND_METHOD_INTERPOLATION : FIND_METHOD_BACKWARD_EXTRAPOLATION;
+                }
+                return u;
+            }
+            previous = current;
+        }
+        // previous is always valid here.
+        if (method != NULL) {
+            *method = FIND_METHOD_BACKWARD_EXTRAPOLATION;
+        }
+        return uArray[previous] + diff * extrapolation;
+    }
+
+private:
+    const size_t    mSize;      // Size of mX and mY arrays (history).
+    size_t          mPos;       // Index in mX and mY of last pushed data;
+                                // (incremented after push) [0, mSize - 1].
+    size_t          mSamples;   // Number of valid samples in the array [0, mSize].
+    bool            mStepValid; // Last sample step was valid (non-negative)
+    bool            mExtrapolateTail; // extrapolate tail using oldest line segment
+    T * const       mX;         // History of X values as a circular array.
+    T * const       mY;         // History of Y values as a circular array.
+};
+
+} // namespace android
+
+#endif // ANDROID_LINEAR_MAP_H
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index f6078a2..f8671b5 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -139,20 +139,21 @@
 status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch,
                                    audio_patch_handle_t *handle)
 {
-    ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
-          patch->num_sources, patch->num_sinks, *handle);
     status_t status = NO_ERROR;
     audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
     sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
+    if (handle == NULL || patch == NULL) {
+        return BAD_VALUE;
+    }
+    ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
+          patch->num_sources, patch->num_sinks, *handle);
     if (audioflinger == 0) {
         return NO_INIT;
     }
 
-    if (handle == NULL || patch == NULL) {
-        return BAD_VALUE;
-    }
     if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
-            patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+            (patch->num_sinks == 0 && patch->num_sources != 2) ||
+            patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
         return BAD_VALUE;
     }
     // limit number of sources to 1 for now or 2 sources for special cross hw module case.
@@ -167,6 +168,12 @@
                 ALOGV("createAudioPatch() removing patch handle %d", *handle);
                 halHandle = mPatches[index]->mHalHandle;
                 Patch *removedPatch = mPatches[index];
+                if ((removedPatch->mRecordPatchHandle
+                        != AUDIO_PATCH_HANDLE_NONE) ||
+                        (removedPatch->mPlaybackPatchHandle !=
+                                AUDIO_PATCH_HANDLE_NONE)) {
+                    clearPatchConnections(removedPatch);
+                }
                 mPatches.removeAt(index);
                 delete removedPatch;
                 break;
@@ -203,18 +210,18 @@
             }
 
             // manage patches requiring a software bridge
+            // - special patch request with 2 sources (reuse one existing output mix) OR
             // - Device to device AND
             //    - source HW module != destination HW module OR
             //    - audio HAL version < 3.0
-            //    - special patch request with 2 sources (reuse one existing output mix)
-            if ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
-                    ((patch->sinks[0].ext.device.hw_module != srcModule) ||
-                    (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) ||
-                    (patch->num_sources == 2))) {
+            if ((patch->num_sources == 2) ||
+                ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
+                 ((patch->sinks[0].ext.device.hw_module != srcModule) ||
+                  (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0)))) {
                 if (patch->num_sources == 2) {
                     if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
-                            patch->sinks[0].ext.device.hw_module !=
-                                    patch->sources[1].ext.mix.hw_module) {
+                            (patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
+                                    patch->sources[1].ext.mix.hw_module)) {
                         ALOGW("createAudioPatch() invalid source combination");
                         status = INVALID_OPERATION;
                         goto exit;
@@ -247,14 +254,27 @@
                         goto exit;
                     }
                 }
-                uint32_t channelCount = newPatch->mPlaybackThread->channelCount();
                 audio_devices_t device = patch->sources[0].ext.device.type;
                 String8 address = String8(patch->sources[0].ext.device.address);
                 audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-                audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
-                config.sample_rate = newPatch->mPlaybackThread->sampleRate();
-                config.channel_mask = inChannelMask;
-                config.format = newPatch->mPlaybackThread->format();
+                // open input stream with source device audio properties if provided or
+                // default to peer output stream properties otherwise.
+                if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+                    config.sample_rate = patch->sources[0].sample_rate;
+                } else {
+                    config.sample_rate = newPatch->mPlaybackThread->sampleRate();
+                }
+                if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+                    config.channel_mask = patch->sources[0].channel_mask;
+                } else {
+                    config.channel_mask =
+                        audio_channel_in_mask_from_count(newPatch->mPlaybackThread->channelCount());
+                }
+                if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+                    config.format = patch->sources[0].format;
+                } else {
+                    config.format = newPatch->mPlaybackThread->format();
+                }
                 audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
                 newPatch->mRecordThread = audioflinger->openInput_l(srcModule,
                                                                     &input,
@@ -264,7 +284,7 @@
                                                                     AUDIO_SOURCE_MIC,
                                                                     AUDIO_INPUT_FLAG_NONE);
                 ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
-                      newPatch->mRecordThread.get(), inChannelMask);
+                      newPatch->mRecordThread.get(), config.channel_mask);
                 if (newPatch->mRecordThread == 0) {
                     status = NO_MEMORY;
                     goto exit;
@@ -348,7 +368,7 @@
 exit:
     ALOGV("createAudioPatch() status %d", status);
     if (status == NO_ERROR) {
-        *handle = audioflinger->nextUniqueId();
+        *handle = (audio_patch_handle_t) audioflinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
         newPatch->mHandle = *handle;
         newPatch->mHalHandle = halHandle;
         mPatches.add(newPatch);
@@ -379,12 +399,16 @@
     }
 
     // create patch from playback thread output to sink device
-    patch->mPlaybackThread->getAudioPortConfig(&subPatch.sources[0]);
-    subPatch.sinks[0] = audioPatch->sinks[0];
-    status = createAudioPatch(&subPatch, &patch->mPlaybackPatchHandle);
-    if (status != NO_ERROR) {
+    if (audioPatch->num_sinks != 0) {
+        patch->mPlaybackThread->getAudioPortConfig(&subPatch.sources[0]);
+        subPatch.sinks[0] = audioPatch->sinks[0];
+        status = createAudioPatch(&subPatch, &patch->mPlaybackPatchHandle);
+        if (status != NO_ERROR) {
+            patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+            return status;
+        }
+    } else {
         patch->mPlaybackPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-        return status;
     }
 
     // use a pseudo LCM between input and output framecount
@@ -396,7 +420,7 @@
         shift = playbackShift;
     }
     size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
-    ALOGV("createPatchConnections() playframeCount %d recordFramecount %d frameCount %d ",
+    ALOGV("createPatchConnections() playframeCount %zu recordFramecount %zu frameCount %zu",
           playbackFrameCount, recordFramecount, frameCount);
 
     // create a special record track to capture from record thread
@@ -448,7 +472,7 @@
     patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get());
 
     // start capture and playback
-    patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, 0);
+    patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
     patch->mPatchTrack->start();
 
     return status;
@@ -609,7 +633,6 @@
 status_t AudioFlinger::PatchPanel::setAudioPortConfig(const struct audio_port_config *config)
 {
     ALOGV("setAudioPortConfig");
-    status_t status = NO_ERROR;
 
     sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
     if (audioflinger == 0) {
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 7bc6f0c..270e27f 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -31,7 +31,7 @@
                                 size_t frameCount,
                                 void *buffer,
                                 const sp<IMemory>& sharedBuffer,
-                                int sessionId,
+                                audio_session_t sessionId,
                                 int uid,
                                 IAudioFlinger::track_flags_t flags,
                                 track_type type);
@@ -42,7 +42,7 @@
             void        dump(char* buffer, size_t size, bool active);
     virtual status_t    start(AudioSystem::sync_event_t event =
                                     AudioSystem::SYNC_EVENT_NONE,
-                             int triggerSession = 0);
+                             audio_session_t triggerSession = AUDIO_SESSION_NONE);
     virtual void        stop();
             void        pause();
 
@@ -83,13 +83,13 @@
                         Track& operator = (const Track&);
 
     // AudioBufferProvider interface
-    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
-                                   int64_t pts = kInvalidPTS);
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
     // releaseBuffer() not overridden
 
     // ExtendedAudioBufferProvider interface
     virtual size_t framesReady() const;
-    virtual size_t framesReleased() const;
+    virtual int64_t framesReleased() const;
+    virtual void onTimestamp(const ExtendedTimestamp &timestamp);
 
     bool isPausing() const { return mState == PAUSING; }
     bool isPaused() const { return mState == PAUSED; }
@@ -101,17 +101,22 @@
     void flushAck();
     bool isResumePending();
     void resumeAck();
+    void updateTrackFrameInfo(int64_t trackFramesReleased, int64_t sinkFramesWritten,
+            const ExtendedTimestamp &timeStamp);
 
     sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
 
     // framesWritten is cumulative, never reset, and is shared all tracks
     // audioHalFrames is derived from output latency
     // FIXME parameters not needed, could get them from the thread
-    bool presentationComplete(size_t framesWritten, size_t audioHalFrames);
+    bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
+    void signalClientFlag(int32_t flag);
 
 public:
     void triggerEvents(AudioSystem::sync_event_t type);
     void invalidate();
+    void disable();
+
     bool isInvalid() const { return mIsInvalid; }
     int fastIndex() const { return mFastIndex; }
 
@@ -138,6 +143,12 @@
     size_t              mPresentationCompleteFrames; // number of frames written to the
                                     // audio HAL when this track will be fully rendered
                                     // zero means not monitoring
+
+    // access these three variables only when holding thread lock.
+    LinearMap<int64_t> mFrameMap;           // track frame to server frame mapping
+
+    ExtendedTimestamp  mSinkTimestamp;
+
 private:
     // The following fields are only for fast tracks, and should be in a subclass
     int                 mFastIndex; // index within FastMixerState::mFastTracks[];
@@ -158,92 +169,6 @@
 
 };  // end of Track
 
-class TimedTrack : public Track {
-  public:
-    static sp<TimedTrack> create(PlaybackThread *thread,
-                                 const sp<Client>& client,
-                                 audio_stream_type_t streamType,
-                                 uint32_t sampleRate,
-                                 audio_format_t format,
-                                 audio_channel_mask_t channelMask,
-                                 size_t frameCount,
-                                 const sp<IMemory>& sharedBuffer,
-                                 int sessionId,
-                                 int uid);
-    virtual ~TimedTrack();
-
-    class TimedBuffer {
-      public:
-        TimedBuffer();
-        TimedBuffer(const sp<IMemory>& buffer, int64_t pts);
-        const sp<IMemory>& buffer() const { return mBuffer; }
-        int64_t pts() const { return mPTS; }
-        uint32_t position() const { return mPosition; }
-        void setPosition(uint32_t pos) { mPosition = pos; }
-      private:
-        sp<IMemory> mBuffer;
-        int64_t     mPTS;
-        uint32_t    mPosition;
-    };
-
-    // Mixer facing methods.
-    virtual size_t framesReady() const;
-
-    // AudioBufferProvider interface
-    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
-                                   int64_t pts);
-    virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
-
-    // Client/App facing methods.
-    status_t    allocateTimedBuffer(size_t size,
-                                    sp<IMemory>* buffer);
-    status_t    queueTimedBuffer(const sp<IMemory>& buffer,
-                                 int64_t pts);
-    status_t    setMediaTimeTransform(const LinearTransform& xform,
-                                      TimedAudioTrack::TargetTimeline target);
-
-  private:
-    TimedTrack(PlaybackThread *thread,
-               const sp<Client>& client,
-               audio_stream_type_t streamType,
-               uint32_t sampleRate,
-               audio_format_t format,
-               audio_channel_mask_t channelMask,
-               size_t frameCount,
-               const sp<IMemory>& sharedBuffer,
-               int sessionId,
-               int uid);
-
-    void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer);
-    void timedYieldSilence_l(uint32_t numFrames,
-                             AudioBufferProvider::Buffer* buffer);
-    void trimTimedBufferQueue_l();
-    void trimTimedBufferQueueHead_l(const char* logTag);
-    void updateFramesPendingAfterTrim_l(const TimedBuffer& buf,
-                                        const char* logTag);
-
-    uint64_t            mLocalTimeFreq;
-    LinearTransform     mLocalTimeToSampleTransform;
-    LinearTransform     mMediaTimeToSampleTransform;
-    sp<MemoryDealer>    mTimedMemoryDealer;
-
-    Vector<TimedBuffer> mTimedBufferQueue;
-    bool                mQueueHeadInFlight;
-    bool                mTrimQueueHeadOnRelease;
-    uint32_t            mFramesPendingInQueue;
-
-    uint8_t*            mTimedSilenceBuffer;
-    uint32_t            mTimedSilenceBufferSize;
-    mutable Mutex       mTimedBufferQueueLock;
-    bool                mTimedAudioOutputOnTime;
-    CCHelper            mCCHelper;
-
-    Mutex               mMediaTimeTransformLock;
-    LinearTransform     mMediaTimeTransform;
-    bool                mMediaTimeTransformValid;
-    TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget;
-};
-
 
 // playback track, used by DuplicatingThread
 class OutputTrack : public Track {
@@ -265,7 +190,7 @@
 
     virtual status_t    start(AudioSystem::sync_event_t event =
                                     AudioSystem::SYNC_EVENT_NONE,
-                             int triggerSession = 0);
+                             audio_session_t triggerSession = AUDIO_SESSION_NONE);
     virtual void        stop();
             bool        write(void* data, uint32_t frames);
             bool        bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
@@ -278,6 +203,8 @@
                                      uint32_t waitTimeMs);
     void                clearBufferQueue();
 
+    void                restartIfDisabled();
+
     // Maximum number of pending buffers allocated by OutputTrack::write()
     static const uint8_t kMaxOverFlowBuffers = 10;
 
@@ -302,9 +229,12 @@
                                    IAudioFlinger::track_flags_t flags);
     virtual             ~PatchTrack();
 
+    virtual status_t    start(AudioSystem::sync_event_t event =
+                                    AudioSystem::SYNC_EVENT_NONE,
+                             audio_session_t triggerSession = AUDIO_SESSION_NONE);
+
     // AudioBufferProvider interface
-    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
-                                   int64_t pts);
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
     virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
 
     // PatchProxyBufferProvider interface
@@ -315,6 +245,8 @@
             void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
 
 private:
+            void restartIfDisabled();
+
     sp<ClientProxy>             mProxy;
     PatchProxyBufferProvider*   mPeerProxy;
     struct timespec             mPeerTimeout;
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 25d6d95..13396a6 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -29,14 +29,14 @@
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
                                 void *buffer,
-                                int sessionId,
+                                audio_session_t sessionId,
                                 int uid,
                                 IAudioFlinger::track_flags_t flags,
                                 track_type type);
     virtual             ~RecordTrack();
     virtual status_t    initCheck() const;
 
-    virtual status_t    start(AudioSystem::sync_event_t event, int triggerSession);
+    virtual status_t    start(AudioSystem::sync_event_t event, audio_session_t triggerSession);
     virtual void        stop();
 
             void        destroy();
@@ -54,6 +54,10 @@
             void        handleSyncStartEvent(const sp<SyncEvent>& event);
             void        clearSyncStartEvent();
 
+            void        updateTrackFrameInfo(int64_t trackFramesReleased,
+                                             int64_t sourceFramesRead,
+                                             uint32_t halSampleRate,
+                                             const ExtendedTimestamp &timestamp);
 private:
     friend class AudioFlinger;  // for mState
 
@@ -61,8 +65,7 @@
                         RecordTrack& operator = (const RecordTrack&);
 
     // AudioBufferProvider interface
-    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
-                                   int64_t pts = kInvalidPTS);
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
     // releaseBuffer() not overridden
 
     bool                mOverflow;  // overflow on most recent attempt to fill client buffer
@@ -99,8 +102,7 @@
     virtual             ~PatchRecord();
 
     // AudioBufferProvider interface
-    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
-                                   int64_t pts);
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
     virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
 
     // PatchProxyBufferProvider interface
diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp
index 2e68dad..3c73543 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/services/audioflinger/ServiceUtilities.cpp
@@ -32,29 +32,37 @@
 
 // Not valid until initialized by AudioFlinger constructor.  It would have to be
 // re-initialized if the process containing AudioFlinger service forks (which it doesn't).
+// This is often used to validate binder interface calls within audioserver
+// (e.g. AudioPolicyManager to AudioFlinger).
 pid_t getpid_cached;
 
-bool recordingAllowed(const String16& opPackageName) {
-    // Note: We are getting the UID from the calling IPC thread state because all
-    // clients that perform recording create AudioRecord in their own processes
-    // and the system does not create AudioRecord objects on behalf of apps. This
-    // differs from playback where in some situations the system recreates AudioTrack
-    // instances associated with a client's MediaPlayer on behalf of this client.
-    // In the latter case we have to store the client UID and pass in along for
-    // security checks.
+// A trusted calling UID may specify the client UID as part of a binder interface call.
+// otherwise the calling UID must be equal to the client UID.
+bool isTrustedCallingUid(uid_t uid) {
+    switch (uid) {
+    case AID_MEDIA:
+    case AID_AUDIOSERVER:
+        return true;
+    default:
+        return false;
+    }
+}
 
+bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid) {
+    // we're always OK.
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+
     static const String16 sRecordAudio("android.permission.RECORD_AUDIO");
 
+    // We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
+    // may open a record track on behalf of a client.  Note that pid may be a tid.
     // IMPORTANT: Don't use PermissionCache - a runtime permission and may change.
-    const bool ok = checkCallingPermission(sRecordAudio);
+    const bool ok = checkPermission(sRecordAudio, pid, uid);
     if (!ok) {
         ALOGE("Request requires android.permission.RECORD_AUDIO");
         return false;
     }
 
-    const uid_t uid = IPCThreadState::self()->getCallingUid();
-
     // To permit command-line native tests
     if (uid == AID_ROOT) return true;
 
@@ -97,11 +105,10 @@
     return true;
 }
 
-bool captureAudioOutputAllowed() {
+bool captureAudioOutputAllowed(pid_t pid, uid_t uid) {
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
-    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
-    bool ok = PermissionCache::checkCallingPermission(sCaptureAudioOutput);
+    bool ok = checkPermission(sCaptureAudioOutput, pid, uid);
     if (!ok) ALOGE("Request requires android.permission.CAPTURE_AUDIO_OUTPUT");
     return ok;
 }
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
index fba6dce..8b1bc00 100644
--- a/services/audioflinger/ServiceUtilities.h
+++ b/services/audioflinger/ServiceUtilities.h
@@ -19,9 +19,9 @@
 namespace android {
 
 extern pid_t getpid_cached;
-
-bool recordingAllowed(const String16& opPackageName);
-bool captureAudioOutputAllowed();
+bool isTrustedCallingUid(uid_t uid);
+bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
+bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
 bool captureHotwordAllowed();
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 7d2d550..3759424 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -36,6 +36,7 @@
 #include <hardware/audio.h>
 #include <audio_effects/effect_ns.h>
 #include <audio_effects/effect_aec.h>
+#include <audio_utils/conversion.h>
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
 #include <audio_utils/minifloat.h>
@@ -48,12 +49,10 @@
 #include <media/nbaio/Pipe.h>
 #include <media/nbaio/PipeReader.h>
 #include <media/nbaio/SourceAudioBufferProvider.h>
+#include <mediautils/BatteryNotifier.h>
 
 #include <powermanager/PowerManager.h>
 
-#include <common_time/cc_helper.h>
-#include <common_time/local_clock.h>
-
 #include "AudioFlinger.h"
 #include "AudioMixer.h"
 #include "BufferProviders.h"
@@ -72,6 +71,8 @@
 #include <cpustats/ThreadCpuUsage.h>
 #endif
 
+#include "AutoPark.h"
+
 // ----------------------------------------------------------------------------
 
 // Note: the following macro is used for extremely verbose logging message.  In
@@ -110,6 +111,8 @@
 // be released as quickly as possible.
 static const int8_t kMaxTrackRetriesDirect = 2;
 
+
+
 // don't warn about blocked writes or record buffer overflows more often than this
 static const nsecs_t kWarningThrottleNs = seconds(5);
 
@@ -137,6 +140,10 @@
 // Offloaded output thread standby delay: allows track transition without going to standby
 static const nsecs_t kOffloadStandbyDelayNs = seconds(1);
 
+// Direct output thread minimum sleep time in idle or active(underrun) state
+static const nsecs_t kDirectMinSleepTimeUs = 10000;
+
+
 // Whether to use fast mixer
 static const enum {
     FastMixer_Never,    // never initialize or use: for debugging only
@@ -166,13 +173,9 @@
 static const int kPriorityFastMixer = 3;
 static const int kPriorityFastCapture = 3;
 
-// IAudioFlinger::createTrack() reports back to client the total size of shared memory area
-// for the track.  The client then sub-divides this into smaller buffers for its use.
-// Currently the client uses N-buffering by default, but doesn't tell us about the value of N.
-// So for now we just assume that client is double-buffered for fast tracks.
-// FIXME It would be better for client to tell AudioFlinger the value of N,
-// so AudioFlinger could allocate the right amount of memory.
-// See the client's minBufCount and mNotificationFramesAct calculations for details.
+// IAudioFlinger::createTrack() has an in/out parameter 'pFrameCount' for the total size of the
+// track buffer in shared memory.  Zero on input means to use a default value.  For fast tracks,
+// AudioFlinger derives the default from HAL buffer size and 'fast track multiplier'.
 
 // This is the default value, if not specified by property.
 static const int kFastTrackMultiplier = 2;
@@ -221,6 +224,94 @@
 }
 #endif
 
+// Track the CLOCK_BOOTTIME versus CLOCK_MONOTONIC timebase offset
+struct {
+    // call when you acquire a partial wakelock
+    void acquire(const sp<IBinder> &wakeLockToken) {
+        pthread_mutex_lock(&mLock);
+        if (wakeLockToken.get() == nullptr) {
+            adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME);
+        } else {
+            if (mCount == 0) {
+                adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME);
+            }
+            ++mCount;
+        }
+        pthread_mutex_unlock(&mLock);
+    }
+
+    // call when you release a partial wakelock.
+    void release(const sp<IBinder> &wakeLockToken) {
+        if (wakeLockToken.get() == nullptr) {
+            return;
+        }
+        pthread_mutex_lock(&mLock);
+        if (--mCount < 0) {
+            ALOGE("negative wakelock count");
+            mCount = 0;
+        }
+        pthread_mutex_unlock(&mLock);
+    }
+
+    // retrieves the boottime timebase offset from monotonic.
+    int64_t getBoottimeOffset() {
+        pthread_mutex_lock(&mLock);
+        int64_t boottimeOffset = mBoottimeOffset;
+        pthread_mutex_unlock(&mLock);
+        return boottimeOffset;
+    }
+
+    // Adjusts the timebase offset between TIMEBASE_MONOTONIC
+    // and the selected timebase.
+    // Currently only TIMEBASE_BOOTTIME is allowed.
+    //
+    // This only needs to be called upon acquiring the first partial wakelock
+    // after all other partial wakelocks are released.
+    //
+    // We do an empirical measurement of the offset rather than parsing
+    // /proc/timer_list since the latter is not a formal kernel ABI.
+    static void adjustTimebaseOffset(int64_t *offset, ExtendedTimestamp::Timebase timebase) {
+        int clockbase;
+        switch (timebase) {
+        case ExtendedTimestamp::TIMEBASE_BOOTTIME:
+            clockbase = SYSTEM_TIME_BOOTTIME;
+            break;
+        default:
+            LOG_ALWAYS_FATAL("invalid timebase %d", timebase);
+            break;
+        }
+        // try three times to get the clock offset, choose the one
+        // with the minimum gap in measurements.
+        const int tries = 3;
+        nsecs_t bestGap, measured;
+        for (int i = 0; i < tries; ++i) {
+            const nsecs_t tmono = systemTime(SYSTEM_TIME_MONOTONIC);
+            const nsecs_t tbase = systemTime(clockbase);
+            const nsecs_t tmono2 = systemTime(SYSTEM_TIME_MONOTONIC);
+            const nsecs_t gap = tmono2 - tmono;
+            if (i == 0 || gap < bestGap) {
+                bestGap = gap;
+                measured = tbase - ((tmono + tmono2) >> 1);
+            }
+        }
+
+        // to avoid micro-adjusting, we don't change the timebase
+        // unless it is significantly different.
+        //
+        // Assumption: It probably takes more than toleranceNs to
+        // suspend and resume the device.
+        static int64_t toleranceNs = 10000; // 10 us
+        if (llabs(*offset - measured) > toleranceNs) {
+            ALOGV("Adjusting timebase offset old: %lld  new: %lld",
+                    (long long)*offset, (long long)measured);
+            *offset = measured;
+        }
+    }
+
+    pthread_mutex_t mLock;
+    int32_t mCount;
+    int64_t mBoottimeOffset;
+} gBoottime = { PTHREAD_MUTEX_INITIALIZER, 0, 0 }; // static, so use POD initialization
 
 // ----------------------------------------------------------------------------
 //      CPU Stats
@@ -357,54 +448,56 @@
         audio_devices_t mDevices;
         const char *    mString;
     } mappingsOut[] = {
-        AUDIO_DEVICE_OUT_EARPIECE,          "EARPIECE",
-        AUDIO_DEVICE_OUT_SPEAKER,           "SPEAKER",
-        AUDIO_DEVICE_OUT_WIRED_HEADSET,     "WIRED_HEADSET",
-        AUDIO_DEVICE_OUT_WIRED_HEADPHONE,   "WIRED_HEADPHONE",
-        AUDIO_DEVICE_OUT_BLUETOOTH_SCO,     "BLUETOOTH_SCO",
-        AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,     "BLUETOOTH_SCO_HEADSET",
-        AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT,      "BLUETOOTH_SCO_CARKIT",
-        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,            "BLUETOOTH_A2DP",
-        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, "BLUETOOTH_A2DP_HEADPHONES",
-        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER,    "BLUETOOTH_A2DP_SPEAKER",
-        AUDIO_DEVICE_OUT_AUX_DIGITAL,       "AUX_DIGITAL",
-        AUDIO_DEVICE_OUT_HDMI,              "HDMI",
-        AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET",
-        AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET",
-        AUDIO_DEVICE_OUT_USB_ACCESSORY,     "USB_ACCESSORY",
-        AUDIO_DEVICE_OUT_USB_DEVICE,        "USB_DEVICE",
-        AUDIO_DEVICE_OUT_TELEPHONY_TX,      "TELEPHONY_TX",
-        AUDIO_DEVICE_OUT_LINE,              "LINE",
-        AUDIO_DEVICE_OUT_HDMI_ARC,          "HDMI_ARC",
-        AUDIO_DEVICE_OUT_SPDIF,             "SPDIF",
-        AUDIO_DEVICE_OUT_FM,                "FM",
-        AUDIO_DEVICE_OUT_AUX_LINE,          "AUX_LINE",
-        AUDIO_DEVICE_OUT_SPEAKER_SAFE,      "SPEAKER_SAFE",
-        AUDIO_DEVICE_OUT_IP,                "IP",
-        AUDIO_DEVICE_NONE,                  "NONE",         // must be last
+        {AUDIO_DEVICE_OUT_EARPIECE,         "EARPIECE"},
+        {AUDIO_DEVICE_OUT_SPEAKER,          "SPEAKER"},
+        {AUDIO_DEVICE_OUT_WIRED_HEADSET,    "WIRED_HEADSET"},
+        {AUDIO_DEVICE_OUT_WIRED_HEADPHONE,  "WIRED_HEADPHONE"},
+        {AUDIO_DEVICE_OUT_BLUETOOTH_SCO,    "BLUETOOTH_SCO"},
+        {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,    "BLUETOOTH_SCO_HEADSET"},
+        {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT,     "BLUETOOTH_SCO_CARKIT"},
+        {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,           "BLUETOOTH_A2DP"},
+        {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,"BLUETOOTH_A2DP_HEADPHONES"},
+        {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER,   "BLUETOOTH_A2DP_SPEAKER"},
+        {AUDIO_DEVICE_OUT_AUX_DIGITAL,      "AUX_DIGITAL"},
+        {AUDIO_DEVICE_OUT_HDMI,             "HDMI"},
+        {AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,"ANLG_DOCK_HEADSET"},
+        {AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,"DGTL_DOCK_HEADSET"},
+        {AUDIO_DEVICE_OUT_USB_ACCESSORY,    "USB_ACCESSORY"},
+        {AUDIO_DEVICE_OUT_USB_DEVICE,       "USB_DEVICE"},
+        {AUDIO_DEVICE_OUT_TELEPHONY_TX,     "TELEPHONY_TX"},
+        {AUDIO_DEVICE_OUT_LINE,             "LINE"},
+        {AUDIO_DEVICE_OUT_HDMI_ARC,         "HDMI_ARC"},
+        {AUDIO_DEVICE_OUT_SPDIF,            "SPDIF"},
+        {AUDIO_DEVICE_OUT_FM,               "FM"},
+        {AUDIO_DEVICE_OUT_AUX_LINE,         "AUX_LINE"},
+        {AUDIO_DEVICE_OUT_SPEAKER_SAFE,     "SPEAKER_SAFE"},
+        {AUDIO_DEVICE_OUT_IP,               "IP"},
+        {AUDIO_DEVICE_OUT_BUS,              "BUS"},
+        {AUDIO_DEVICE_NONE,                 "NONE"},       // must be last
     }, mappingsIn[] = {
-        AUDIO_DEVICE_IN_COMMUNICATION,      "COMMUNICATION",
-        AUDIO_DEVICE_IN_AMBIENT,            "AMBIENT",
-        AUDIO_DEVICE_IN_BUILTIN_MIC,        "BUILTIN_MIC",
-        AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET,  "BLUETOOTH_SCO_HEADSET",
-        AUDIO_DEVICE_IN_WIRED_HEADSET,      "WIRED_HEADSET",
-        AUDIO_DEVICE_IN_AUX_DIGITAL,        "AUX_DIGITAL",
-        AUDIO_DEVICE_IN_VOICE_CALL,         "VOICE_CALL",
-        AUDIO_DEVICE_IN_TELEPHONY_RX,       "TELEPHONY_RX",
-        AUDIO_DEVICE_IN_BACK_MIC,           "BACK_MIC",
-        AUDIO_DEVICE_IN_REMOTE_SUBMIX,      "REMOTE_SUBMIX",
-        AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET,  "ANLG_DOCK_HEADSET",
-        AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET,  "DGTL_DOCK_HEADSET",
-        AUDIO_DEVICE_IN_USB_ACCESSORY,      "USB_ACCESSORY",
-        AUDIO_DEVICE_IN_USB_DEVICE,         "USB_DEVICE",
-        AUDIO_DEVICE_IN_FM_TUNER,           "FM_TUNER",
-        AUDIO_DEVICE_IN_TV_TUNER,           "TV_TUNER",
-        AUDIO_DEVICE_IN_LINE,               "LINE",
-        AUDIO_DEVICE_IN_SPDIF,              "SPDIF",
-        AUDIO_DEVICE_IN_BLUETOOTH_A2DP,     "BLUETOOTH_A2DP",
-        AUDIO_DEVICE_IN_LOOPBACK,           "LOOPBACK",
-        AUDIO_DEVICE_IN_IP,                 "IP",
-        AUDIO_DEVICE_NONE,                  "NONE",         // must be last
+        {AUDIO_DEVICE_IN_COMMUNICATION,     "COMMUNICATION"},
+        {AUDIO_DEVICE_IN_AMBIENT,           "AMBIENT"},
+        {AUDIO_DEVICE_IN_BUILTIN_MIC,       "BUILTIN_MIC"},
+        {AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET"},
+        {AUDIO_DEVICE_IN_WIRED_HEADSET,     "WIRED_HEADSET"},
+        {AUDIO_DEVICE_IN_AUX_DIGITAL,       "AUX_DIGITAL"},
+        {AUDIO_DEVICE_IN_VOICE_CALL,        "VOICE_CALL"},
+        {AUDIO_DEVICE_IN_TELEPHONY_RX,      "TELEPHONY_RX"},
+        {AUDIO_DEVICE_IN_BACK_MIC,          "BACK_MIC"},
+        {AUDIO_DEVICE_IN_REMOTE_SUBMIX,     "REMOTE_SUBMIX"},
+        {AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET"},
+        {AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET"},
+        {AUDIO_DEVICE_IN_USB_ACCESSORY,     "USB_ACCESSORY"},
+        {AUDIO_DEVICE_IN_USB_DEVICE,        "USB_DEVICE"},
+        {AUDIO_DEVICE_IN_FM_TUNER,          "FM_TUNER"},
+        {AUDIO_DEVICE_IN_TV_TUNER,          "TV_TUNER"},
+        {AUDIO_DEVICE_IN_LINE,              "LINE"},
+        {AUDIO_DEVICE_IN_SPDIF,             "SPDIF"},
+        {AUDIO_DEVICE_IN_BLUETOOTH_A2DP,    "BLUETOOTH_A2DP"},
+        {AUDIO_DEVICE_IN_LOOPBACK,          "LOOPBACK"},
+        {AUDIO_DEVICE_IN_IP,                "IP"},
+        {AUDIO_DEVICE_IN_BUS,               "BUS"},
+        {AUDIO_DEVICE_NONE,                 "NONE"},        // must be last
     };
     String8 result;
     audio_devices_t allDevices = AUDIO_DEVICE_NONE;
@@ -442,9 +535,11 @@
         audio_input_flags_t     mFlag;
         const char *            mString;
     } mappings[] = {
-        AUDIO_INPUT_FLAG_FAST,              "FAST",
-        AUDIO_INPUT_FLAG_HW_HOTWORD,        "HW_HOTWORD",
-        AUDIO_INPUT_FLAG_NONE,              "NONE",         // must be last
+        {AUDIO_INPUT_FLAG_FAST,             "FAST"},
+        {AUDIO_INPUT_FLAG_HW_HOTWORD,       "HW_HOTWORD"},
+        {AUDIO_INPUT_FLAG_RAW,              "RAW"},
+        {AUDIO_INPUT_FLAG_SYNC,             "SYNC"},
+        {AUDIO_INPUT_FLAG_NONE,             "NONE"},        // must be last
     };
     String8 result;
     audio_input_flags_t allFlags = AUDIO_INPUT_FLAG_NONE;
@@ -476,14 +571,17 @@
         audio_output_flags_t    mFlag;
         const char *            mString;
     } mappings[] = {
-        AUDIO_OUTPUT_FLAG_DIRECT,           "DIRECT",
-        AUDIO_OUTPUT_FLAG_PRIMARY,          "PRIMARY",
-        AUDIO_OUTPUT_FLAG_FAST,             "FAST",
-        AUDIO_OUTPUT_FLAG_DEEP_BUFFER,      "DEEP_BUFFER",
-        AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD, "COMPRESS_OFFLOAD",
-        AUDIO_OUTPUT_FLAG_NON_BLOCKING,     "NON_BLOCKING",
-        AUDIO_OUTPUT_FLAG_HW_AV_SYNC,       "HW_AV_SYNC",
-        AUDIO_OUTPUT_FLAG_NONE,             "NONE",         // must be last
+        {AUDIO_OUTPUT_FLAG_DIRECT,          "DIRECT"},
+        {AUDIO_OUTPUT_FLAG_PRIMARY,         "PRIMARY"},
+        {AUDIO_OUTPUT_FLAG_FAST,            "FAST"},
+        {AUDIO_OUTPUT_FLAG_DEEP_BUFFER,     "DEEP_BUFFER"},
+        {AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD,"COMPRESS_OFFLOAD"},
+        {AUDIO_OUTPUT_FLAG_NON_BLOCKING,    "NON_BLOCKING"},
+        {AUDIO_OUTPUT_FLAG_HW_AV_SYNC,      "HW_AV_SYNC"},
+        {AUDIO_OUTPUT_FLAG_RAW,             "RAW"},
+        {AUDIO_OUTPUT_FLAG_SYNC,            "SYNC"},
+        {AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO, "IEC958_NONAUDIO"},
+        {AUDIO_OUTPUT_FLAG_NONE,            "NONE"},        // must be last
     };
     String8 result;
     audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE;
@@ -521,6 +619,7 @@
     case AUDIO_SOURCE_VOICE_RECOGNITION:    return "voice recognition";
     case AUDIO_SOURCE_VOICE_COMMUNICATION:  return "voice communication";
     case AUDIO_SOURCE_REMOTE_SUBMIX:        return "remote submix";
+    case AUDIO_SOURCE_UNPROCESSED:          return "unprocessed";
     case AUDIO_SOURCE_FM_TUNER:             return "FM tuner";
     case AUDIO_SOURCE_HOTWORD:              return "hotword";
     default:                                return "unknown";
@@ -541,7 +640,8 @@
         mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
         // mName will be set by concrete (non-virtual) subclass
         mDeathRecipient(new PMDeathRecipient(this)),
-        mSystemReady(systemReady)
+        mSystemReady(systemReady),
+        mNotifiedBatteryStart(false)
 {
     memset(&mPatch, 0, sizeof(struct audio_patch));
 }
@@ -596,8 +696,6 @@
 
 status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
 {
-    status_t status;
-
     ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string());
     Mutex::Autolock _l(mLock);
 
@@ -616,7 +714,7 @@
         return status;
     }
     mConfigEvents.add(event);
-    ALOGV("sendConfigEvent_l() num events %d event %d", mConfigEvents.size(), event->mType);
+    ALOGV("sendConfigEvent_l() num events %zu event %d", mConfigEvents.size(), event->mType);
     mWaitWorkCV.signal();
     mLock.unlock();
     {
@@ -662,7 +760,19 @@
 // sendSetParameterConfigEvent_l() must be called with ThreadBase::mLock held
 status_t AudioFlinger::ThreadBase::sendSetParameterConfigEvent_l(const String8& keyValuePair)
 {
-    sp<ConfigEvent> configEvent = (ConfigEvent *)new SetParameterConfigEvent(keyValuePair);
+    sp<ConfigEvent> configEvent;
+    AudioParameter param(keyValuePair);
+    int value;
+    if (param.getInt(String8(AUDIO_PARAMETER_MONO_OUTPUT), value) == NO_ERROR) {
+        setMasterMono_l(value != 0);
+        if (param.size() == 1) {
+            return NO_ERROR; // should be a solo parameter - we don't pass down
+        }
+        param.remove(String8(AUDIO_PARAMETER_MONO_OUTPUT));
+        configEvent = new SetParameterConfigEvent(param.toString());
+    } else {
+        configEvent = new SetParameterConfigEvent(keyValuePair);
+    }
     return sendConfigEvent_l(configEvent);
 }
 
@@ -696,7 +806,7 @@
     bool configChanged = false;
 
     while (!mConfigEvents.isEmpty()) {
-        ALOGV("processConfigEvents_l() remaining events %d", mConfigEvents.size());
+        ALOGV("processConfigEvents_l() remaining events %zu", mConfigEvents.size());
         sp<ConfigEvent> event = mConfigEvents[0];
         mConfigEvents.removeAt(0);
         switch (event->mType) {
@@ -795,7 +905,7 @@
         }
         const int len = s.length();
         if (len > 2) {
-            char *str = s.lockBuffer(len); // needed?
+            (void) s.lockBuffer(len);      // needed?
             s.unlockBuffer(len - 2);       // remove trailing ", "
         }
         return s;
@@ -828,12 +938,12 @@
     dprintf(fd, "  Sample rate: %u Hz\n", mSampleRate);
     dprintf(fd, "  HAL frame count: %zu\n", mFrameCount);
     dprintf(fd, "  HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
-    dprintf(fd, "  HAL buffer size: %u bytes\n", mBufferSize);
+    dprintf(fd, "  HAL buffer size: %zu bytes\n", mBufferSize);
     dprintf(fd, "  Channel count: %u\n", mChannelCount);
     dprintf(fd, "  Channel mask: 0x%08x (%s)\n", mChannelMask,
             channelMaskToString(mChannelMask, mType != RECORD).string());
-    dprintf(fd, "  Format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
-    dprintf(fd, "  Frame size: %zu bytes\n", mFrameSize);
+    dprintf(fd, "  Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
+    dprintf(fd, "  Processing frame size: %zu bytes\n", mFrameSize);
     dprintf(fd, "  Pending config events:");
     size_t numConfig = mConfigEvents.size();
     if (numConfig) {
@@ -907,14 +1017,14 @@
             status = mPowerManager->acquireWakeLockWithUid(POWERMANAGER_PARTIAL_WAKE_LOCK,
                     binder,
                     getWakeLockTag(),
-                    String16("media"),
+                    String16("audioserver"),
                     uid,
                     true /* FIXME force oneway contrary to .aidl */);
         } else {
             status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
                     binder,
                     getWakeLockTag(),
-                    String16("media"),
+                    String16("audioserver"),
                     true /* FIXME force oneway contrary to .aidl */);
         }
         if (status == NO_ERROR) {
@@ -922,6 +1032,14 @@
         }
         ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
     }
+
+    if (!mNotifiedBatteryStart) {
+        BatteryNotifier::getInstance().noteStartAudio();
+        mNotifiedBatteryStart = true;
+    }
+    gBoottime.acquire(mWakeLockToken);
+    mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
+            gBoottime.getBoottimeOffset();
 }
 
 void AudioFlinger::ThreadBase::releaseWakeLock()
@@ -932,6 +1050,7 @@
 
 void AudioFlinger::ThreadBase::releaseWakeLock_l()
 {
+    gBoottime.release(mWakeLockToken);
     if (mWakeLockToken != 0) {
         ALOGV("releaseWakeLock_l() %s", mThreadName);
         if (mPowerManager != 0) {
@@ -940,6 +1059,11 @@
         }
         mWakeLockToken.clear();
     }
+
+    if (mNotifiedBatteryStart) {
+        BatteryNotifier::getInstance().noteStopAudio();
+        mNotifiedBatteryStart = false;
+    }
 }
 
 void AudioFlinger::ThreadBase::updateWakeLockUids(const SortedVector<int> &uids) {
@@ -963,8 +1087,12 @@
 
 void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uids) {
     getPowerManager_l();
-    if (mWakeLockToken == NULL) {
-        ALOGE("no wake lock to update!");
+    if (mWakeLockToken == NULL) { // token may be NULL if AudioFlinger::systemReady() not called.
+        if (mSystemReady) {
+            ALOGE("no wake lock to update, but system ready!");
+        } else {
+            ALOGW("no wake lock to update, system not ready yet");
+        }
         return;
     }
     if (mPowerManager != 0) {
@@ -972,7 +1100,7 @@
         status_t status;
         status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
                     true /* FIXME force oneway contrary to .aidl */);
-        ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
+        ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status);
     }
 }
 
@@ -993,14 +1121,14 @@
 }
 
 void AudioFlinger::ThreadBase::setEffectSuspended(
-        const effect_uuid_t *type, bool suspend, int sessionId)
+        const effect_uuid_t *type, bool suspend, audio_session_t sessionId)
 {
     Mutex::Autolock _l(mLock);
     setEffectSuspended_l(type, suspend, sessionId);
 }
 
 void AudioFlinger::ThreadBase::setEffectSuspended_l(
-        const effect_uuid_t *type, bool suspend, int sessionId)
+        const effect_uuid_t *type, bool suspend, audio_session_t sessionId)
 {
     sp<EffectChain> chain = getEffectChain_l(sessionId);
     if (chain != 0) {
@@ -1040,7 +1168,7 @@
 
 void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type,
                                                          bool suspend,
-                                                         int sessionId)
+                                                         audio_session_t sessionId)
 {
     ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
 
@@ -1101,7 +1229,7 @@
 
 void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
                                                             bool enabled,
-                                                            int sessionId)
+                                                            audio_session_t sessionId)
 {
     Mutex::Autolock _l(mLock);
     checkSuspendOnEffectEnabled_l(effect, enabled, sessionId);
@@ -1109,7 +1237,7 @@
 
 void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
                                                             bool enabled,
-                                                            int sessionId)
+                                                            audio_session_t sessionId)
 {
     if (mType != RECORD) {
         // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on
@@ -1133,7 +1261,7 @@
         const sp<AudioFlinger::Client>& client,
         const sp<IEffectClient>& effectClient,
         int32_t priority,
-        int sessionId,
+        audio_session_t sessionId,
         effect_descriptor_t *desc,
         int *enabled,
         status_t *status)
@@ -1216,7 +1344,7 @@
         ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
 
         if (effect == 0) {
-            int id = mAudioFlinger->nextUniqueId();
+            audio_unique_id_t id = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
             // Check CPU and memory usage
             lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
             if (lStatus != NO_ERROR) {
@@ -1272,13 +1400,15 @@
     return handle;
 }
 
-sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(int sessionId, int effectId)
+sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(audio_session_t sessionId,
+        int effectId)
 {
     Mutex::Autolock _l(mLock);
     return getEffect_l(sessionId, effectId);
 }
 
-sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId)
+sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(audio_session_t sessionId,
+        int effectId)
 {
     sp<EffectChain> chain = getEffectChain_l(sessionId);
     return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
@@ -1289,7 +1419,7 @@
 status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect)
 {
     // check for existing effect chain with the requested audio session
-    int sessionId = effect->sessionId();
+    audio_session_t sessionId = effect->sessionId();
     sp<EffectChain> chain = getEffectChain_l(sessionId);
     bool chainCreated = false;
 
@@ -1366,13 +1496,14 @@
     }
 }
 
-sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(int sessionId)
+sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(audio_session_t sessionId)
 {
     Mutex::Autolock _l(mLock);
     return getEffectChain_l(sessionId);
 }
 
-sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId) const
+sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(audio_session_t sessionId)
+        const
 {
     size_t size = mEffectChains.size();
     for (size_t i = 0; i < size; i++) {
@@ -1441,10 +1572,11 @@
         mEffectBufferFormat(AUDIO_FORMAT_INVALID),
         mEffectBufferValid(false),
         mSuspended(0), mBytesWritten(0),
+        mFramesWritten(0),
         mActiveTracksGeneration(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
-        mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
+        mLastWriteTime(-1), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
         mMixerStatus(MIXER_IDLE),
         mMixerStatusIgnoringFastTracks(MIXER_IDLE),
         mStandbyDelayNs(AudioFlinger::mStandbyTimeInNsecs),
@@ -1456,10 +1588,8 @@
         mSignalPending(false),
         mScreenState(AudioFlinger::mScreenState),
         // index 0 is reserved for normal mixer's submix
-        mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1),
-        mHwSupportsPause(false), mHwPaused(false), mFlushPending(false),
-        // mLatchD, mLatchQ,
-        mLatchDValid(false), mLatchQValid(false)
+        mFastTrackAvailMask(((1 << FastMixerState::sMaxFastTracks) - 1) & ~1),
+        mHwSupportsPause(false), mHwPaused(false), mFlushPending(false)
 {
     snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
     mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
@@ -1536,10 +1666,10 @@
 
     size_t numtracks = mTracks.size();
     size_t numactive = mActiveTracks.size();
-    dprintf(fd, "  %d Tracks", numtracks);
+    dprintf(fd, "  %zu Tracks", numtracks);
     size_t numactiveseen = 0;
     if (numtracks) {
-        dprintf(fd, " of which %d are active\n", numactive);
+        dprintf(fd, " of which %zu are active\n", numactive);
         Track::appendDumpHeader(result);
         for (size_t i = 0; i < numtracks; ++i) {
             sp<Track> track = mTracks[i];
@@ -1580,7 +1710,8 @@
     dumpBase(fd, args);
 
     dprintf(fd, "  Normal frame count: %zu\n", mNormalFrameCount);
-    dprintf(fd, "  Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
+    dprintf(fd, "  Last write occurred (msecs): %llu\n",
+            (unsigned long long) ns2ms(systemTime() - mLastWriteTime));
     dprintf(fd, "  Total writes: %d\n", mNumWrites);
     dprintf(fd, "  Delayed writes: %d\n", mNumDelayedWrites);
     dprintf(fd, "  Blocked in write: %s\n", mInWrite ? "yes" : "no");
@@ -1621,7 +1752,7 @@
         audio_channel_mask_t channelMask,
         size_t *pFrameCount,
         const sp<IMemory>& sharedBuffer,
-        int sessionId,
+        audio_session_t sessionId,
         IAudioFlinger::track_flags_t *flags,
         pid_t tid,
         int uid,
@@ -1631,27 +1762,9 @@
     sp<Track> track;
     status_t lStatus;
 
-    bool isTimed = (*flags & IAudioFlinger::TRACK_TIMED) != 0;
-
     // client expresses a preference for FAST, but we get the final say
     if (*flags & IAudioFlinger::TRACK_FAST) {
       if (
-            // not timed
-            (!isTimed) &&
-            // either of these use cases:
-            (
-              // use case 1: shared buffer with any frame count
-              (
-                (sharedBuffer != 0)
-              ) ||
-              // use case 2: frame count is default or at least as large as HAL
-              (
-                // we formerly checked for a callback handler (non-0 tid),
-                // but that is no longer required for TRANSFER_OBTAIN mode
-                ((frameCount == 0) ||
-                (frameCount >= mFrameCount))
-              )
-            ) &&
             // PCM data
             audio_is_linear_pcm(format) &&
             // TODO: extract as a data library function that checks that a computationally
@@ -1669,23 +1782,23 @@
             // FIXME test that MixerThread for this fast track has a capable output HAL
             // FIXME add a permission test also?
         ) {
-        // if frameCount not specified, then it defaults to fast mixer (HAL) frame count
-        if (frameCount == 0) {
+        // static tracks can have any nonzero framecount, streaming tracks check against minimum.
+        if (sharedBuffer == 0) {
             // read the fast track multiplier property the first time it is needed
             int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit);
             if (ok != 0) {
                 ALOGE("%s pthread_once failed: %d", __func__, ok);
             }
-            frameCount = mFrameCount * sFastTrackMultiplier;
+            frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
         }
-        ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+        ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
                 frameCount, mFrameCount);
       } else {
-        ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d "
-                "mFrameCount=%d format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
+        ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
+                "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
                 "sampleRate=%u mSampleRate=%u "
                 "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
-                isTimed, sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
+                sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
                 audio_is_linear_pcm(format),
                 channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
         *flags &= ~IAudioFlinger::TRACK_FAST;
@@ -1697,7 +1810,7 @@
     // This is probably too conservative, but legacy application code may depend on it.
     // If you change this calculation, also review the start threshold which is related.
     if (!(*flags & IAudioFlinger::TRACK_FAST)
-            && audio_is_linear_pcm(format) && sharedBuffer == 0) {
+            && audio_has_proportional_frames(format) && sharedBuffer == 0) {
         // this must match AudioTrack.cpp calculateMinFrameCount().
         // TODO: Move to a common library
         uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
@@ -1720,7 +1833,7 @@
     switch (mType) {
 
     case DIRECT:
-        if (audio_is_linear_pcm(format)) {
+        if (audio_is_linear_pcm(format)) { // TODO maybe use audio_has_proportional_frames()?
             if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
                 ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x "
                         "for output %p with format %#x",
@@ -1784,17 +1897,10 @@
             }
         }
 
-        if (!isTimed) {
-            track = new Track(this, client, streamType, sampleRate, format,
-                              channelMask, frameCount, NULL, sharedBuffer,
-                              sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
-        } else {
-            track = TimedTrack::create(this, client, streamType, sampleRate, format,
-                    channelMask, frameCount, sharedBuffer, sessionId, uid);
-        }
+        track = new Track(this, client, streamType, sampleRate, format,
+                          channelMask, frameCount, NULL, sharedBuffer,
+                          sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
 
-        // new Track always returns non-NULL,
-        // but TimedTrack::create() is a factory that could fail by returning NULL
         lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
         if (lStatus != NO_ERROR) {
             ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
@@ -1894,8 +2000,6 @@
 {
     status_t status = ALREADY_EXISTS;
 
-    // set retry count for buffer fill
-    track->mRetryCount = kMaxTrackStartupRetries;
     if (mActiveTracks.indexOf(track) < 0) {
         // the track is newly added, make sure it fills up all its
         // buffers before playing. This is to ensure the client will
@@ -1904,14 +2008,14 @@
             TrackBase::track_state state = track->mState;
             mLock.unlock();
             status = AudioSystem::startOutput(mId, track->streamType(),
-                                              (audio_session_t)track->sessionId());
+                                              track->sessionId());
             mLock.lock();
             // abort track was stopped/paused while we released the lock
             if (state != track->mState) {
                 if (status == NO_ERROR) {
                     mLock.unlock();
                     AudioSystem::stopOutput(mId, track->streamType(),
-                                            (audio_session_t)track->sessionId());
+                                            track->sessionId());
                     mLock.lock();
                 }
                 return INVALID_OPERATION;
@@ -1926,7 +2030,20 @@
 #endif
         }
 
-        track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
+        // set retry count for buffer fill
+        if (track->isOffloaded()) {
+            if (track->isStopping_1()) {
+                track->mRetryCount = kMaxTrackStopRetriesOffload;
+            } else {
+                track->mRetryCount = kMaxTrackStartupRetriesOffload;
+            }
+            track->mFillingUpStatus = mStandby ? Track::FS_FILLING : Track::FS_FILLED;
+        } else {
+            track->mRetryCount = kMaxTrackStartupRetries;
+            track->mFillingUpStatus =
+                    track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
+        }
+
         track->mResetDone = false;
         track->mPresentationCompleteFrames = 0;
         mActiveTracks.add(track);
@@ -1971,7 +2088,7 @@
     track->mName = -1;
     if (track->isFastTrack()) {
         int index = track->mFastIndex;
-        ALOG_ASSERT(0 < index && index < (int)FastMixerState::kMaxFastTracks);
+        ALOG_ASSERT(0 < index && index < (int)FastMixerState::sMaxFastTracks);
         ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
         mFastTrackAvailMask |= 1 << index;
         // redundant as track is about to be destroyed, for dumpsys only
@@ -2021,6 +2138,7 @@
         desc->mFormat = mFormat;
         desc->mFrameCount = mNormalFrameCount; // FIXME see
                                              // AudioFlinger::frameCount(audio_io_handle_t)
+        desc->mFrameCountHAL = mFrameCount;
         desc->mLatency = latency_l();
         break;
 
@@ -2116,7 +2234,7 @@
     mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
     mFrameCount = mBufferSize / mFrameSize;
     if (mFrameCount & 15) {
-        ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
+        ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
                 mFrameCount);
     }
 
@@ -2202,7 +2320,7 @@
     if (mType == MIXER || mType == DUPLICATING) {
         mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
     }
-    ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount,
+    ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames", mFrameCount,
             mNormalFrameCount);
 
     // Check if we want to throttle the processing to no more than 2x normal rate
@@ -2261,13 +2379,14 @@
     if (initCheck() != NO_ERROR) {
         return INVALID_OPERATION;
     }
-    size_t framesWritten = mBytesWritten / mFrameSize;
+    int64_t framesWritten = mBytesWritten / mFrameSize;
     *halFrames = framesWritten;
 
     if (isSuspended()) {
         // return an estimation of rendered frames when the output is suspended
         size_t latencyFrames = (latency_l() * mSampleRate) / 1000;
-        *dspFrames = framesWritten >= latencyFrames ? framesWritten - latencyFrames : 0;
+        *dspFrames = (uint32_t)
+                (framesWritten >= (int64_t)latencyFrames ? framesWritten - latencyFrames : 0);
         return NO_ERROR;
     } else {
         status_t status;
@@ -2278,7 +2397,7 @@
     }
 }
 
-uint32_t AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) const
+uint32_t AudioFlinger::PlaybackThread::hasAudioSession(audio_session_t sessionId) const
 {
     Mutex::Autolock _l(mLock);
     uint32_t result = 0;
@@ -2297,7 +2416,7 @@
     return result;
 }
 
-uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId)
+uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(audio_session_t sessionId)
 {
     // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
     // it is moved to correct output by audio policy manager when A2DP is connected or disconnected
@@ -2380,14 +2499,14 @@
             const sp<Track>& track = tracksToRemove.itemAt(i);
             if (track->isExternalTrack()) {
                 AudioSystem::stopOutput(mId, track->streamType(),
-                                        (audio_session_t)track->sessionId());
+                                        track->sessionId());
 #ifdef ADD_BATTERY_DATA
                 // to track the speaker usage
                 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
 #endif
                 if (track->isTerminated()) {
                     AudioSystem::releaseOutput(mId, track->streamType(),
-                                               (audio_session_t)track->sessionId());
+                                               track->sessionId());
                 }
             }
         }
@@ -2398,6 +2517,10 @@
 {
     if (!mMasterMute) {
         char value[PROPERTY_VALUE_MAX];
+        if (mOutDevice == AUDIO_DEVICE_OUT_REMOTE_SUBMIX) {
+            ALOGD("ro.audio.silent will be ignored for threads on AUDIO_DEVICE_OUT_REMOTE_SUBMIX");
+            return;
+        }
         if (property_get("ro.audio.silent", value, "0") > 0) {
             char *endptr;
             unsigned long ul = strtoul(value, &endptr, 0);
@@ -2414,8 +2537,6 @@
 // shared by MIXER and DIRECT, overridden by DUPLICATING
 ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
 {
-    // FIXME rewrite to reduce number of system calls
-    mLastWriteTime = systemTime();
     mInWrite = true;
     ssize_t bytesWritten;
     const size_t offset = mCurrentWriteLength - mBytesRemaining;
@@ -2443,16 +2564,6 @@
         } else {
             bytesWritten = framesWritten;
         }
-        mLatchDValid = false;
-        status_t status = mNormalSink->getTimestamp(mLatchD.mTimestamp);
-        if (status == NO_ERROR) {
-            size_t totalFramesWritten = mNormalSink->framesWritten();
-            if (totalFramesWritten >= mLatchD.mTimestamp.mPosition) {
-                mLatchD.mUnpresentedFrames = totalFramesWritten - mLatchD.mTimestamp.mPosition;
-                // mLatchD.mFramesReleased is set immediately before D is clocked into Q
-                mLatchDValid = true;
-            }
-        }
     // otherwise use the HAL / AudioStreamOut directly
     } else {
         // Direct output and offload threads
@@ -2467,6 +2578,7 @@
         // FIXME We should have an implementation of timestamps for direct output threads.
         // They are used e.g for multichannel PCM playback over HDMI.
         bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining);
+
         if (mUseAsyncWrite &&
                 ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
             // do not wait for async callback in case of error of full write
@@ -2545,30 +2657,37 @@
     }
 }
 
-void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
+bool AudioFlinger::PlaybackThread::invalidateTracks_l(audio_stream_type_t streamType)
 {
-    ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
+    ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %zu",
             this,  streamType, mTracks.size());
-    Mutex::Autolock _l(mLock);
-
+    bool trackMatch = false;
     size_t size = mTracks.size();
     for (size_t i = 0; i < size; i++) {
         sp<Track> t = mTracks[i];
-        if (t->streamType() == streamType) {
+        if (t->streamType() == streamType && t->isExternalTrack()) {
             t->invalidate();
+            trackMatch = true;
         }
     }
+    return trackMatch;
+}
+
+void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
+{
+    Mutex::Autolock _l(mLock);
+    invalidateTracks_l(streamType);
 }
 
 status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
 {
-    int session = chain->sessionId();
+    audio_session_t session = chain->sessionId();
     int16_t* buffer = reinterpret_cast<int16_t*>(mEffectBufferEnabled
             ? mEffectBuffer : mSinkBuffer);
     bool ownsBuffer = false;
 
     ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
-    if (session > 0) {
+    if (session > AUDIO_SESSION_OUTPUT_MIX) {
         // Only one effect chain can be present in direct output thread and it uses
         // the sink buffer as input
         if (mType != DIRECT) {
@@ -2607,15 +2726,18 @@
     chain->setOutBuffer(reinterpret_cast<int16_t*>(mEffectBufferEnabled
             ? mEffectBuffer : mSinkBuffer));
     // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
-    // chains list in order to be processed last as it contains output stage effects
+    // chains list in order to be processed last as it contains output stage effects.
     // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
     // session AUDIO_SESSION_OUTPUT_STAGE to be processed
-    // after track specific effects and before output stage
+    // after track specific effects and before output stage.
     // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
-    // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX
+    // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX.
     // Effect chain for other sessions are inserted at beginning of effect
     // chains list to be processed before output mix effects. Relative order between other
-    // sessions is not important
+    // sessions is not important.
+    static_assert(AUDIO_SESSION_OUTPUT_MIX == 0 &&
+            AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX,
+            "audio_session_t constants misdefined");
     size_t size = mEffectChains.size();
     size_t i = 0;
     for (i = 0; i < size; i++) {
@@ -2631,7 +2753,7 @@
 
 size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& chain)
 {
-    int session = chain->sessionId();
+    audio_session_t session = chain->sessionId();
 
     ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
 
@@ -2710,6 +2832,8 @@
     Vector< sp<Track> > tracksToRemove;
 
     mStandbyTimeNs = systemTime();
+    nsecs_t lastWriteFinished = -1; // time last server write completed
+    int64_t lastFramesWritten = -1; // track changes in timestamp server frames written
 
     // MIXER
     nsecs_t lastWarning = 0;
@@ -2758,21 +2882,74 @@
             }
 
             // Gather the framesReleased counters for all active tracks,
-            // and latch them atomically with the timestamp.
-            // FIXME We're using raw pointers as indices. A unique track ID would be a better index.
-            mLatchD.mFramesReleased.clear();
-            size_t size = mActiveTracks.size();
-            for (size_t i = 0; i < size; i++) {
-                sp<Track> t = mActiveTracks[i].promote();
-                if (t != 0) {
-                    mLatchD.mFramesReleased.add(t.get(),
-                            t->mAudioTrackServerProxy->framesReleased());
+            // and associate with the sink frames written out.  We need
+            // this to convert the sink timestamp to the track timestamp.
+            bool kernelLocationUpdate = false;
+            if (mNormalSink != 0) {
+                // Note: The DuplicatingThread may not have a mNormalSink.
+                // We always fetch the timestamp here because often the downstream
+                // sink will block while writing.
+                ExtendedTimestamp timestamp; // use private copy to fetch
+                (void) mNormalSink->getTimestamp(timestamp);
+
+                // We keep track of the last valid kernel position in case we are in underrun
+                // and the normal mixer period is the same as the fast mixer period, or there
+                // is some error from the HAL.
+                if (mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
+                    mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
+                            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+                    mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
+                            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+
+                    mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
+                            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER];
+                    mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
+                            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER];
                 }
+
+                if (timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
+                    kernelLocationUpdate = true;
+                } else {
+                    ALOGV("getTimestamp error - no valid kernel position");
+                }
+
+                // copy over kernel info
+                mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+                        timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+                        timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
             }
-            if (mLatchDValid) {
-                mLatchQ = mLatchD;
-                mLatchDValid = false;
-                mLatchQValid = true;
+            // mFramesWritten for non-offloaded tracks are contiguous
+            // even after standby() is called. This is useful for the track frame
+            // to sink frame mapping.
+            bool serverLocationUpdate = false;
+            if (mFramesWritten != lastFramesWritten) {
+                serverLocationUpdate = true;
+                lastFramesWritten = mFramesWritten;
+            }
+            // Only update timestamps if there is a meaningful change.
+            // Either the kernel timestamp must be valid or we have written something.
+            if (kernelLocationUpdate || serverLocationUpdate) {
+                if (serverLocationUpdate) {
+                    // use the time before we called the HAL write - it is a bit more accurate
+                    // to when the server last read data than the current time here.
+                    //
+                    // If we haven't written anything, mLastWriteTime will be -1
+                    // and we use systemTime().
+                    mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = mFramesWritten;
+                    mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastWriteTime == -1
+                            ? systemTime() : mLastWriteTime;
+                }
+                const size_t size = mActiveTracks.size();
+                for (size_t i = 0; i < size; ++i) {
+                    sp<Track> t = mActiveTracks[i].promote();
+                    if (t != 0 && !t->isFastTrack()) {
+                        t->updateTrackFrameInfo(
+                                t->mAudioTrackServerProxy->framesReleased(),
+                                mFramesWritten,
+                                mTimestamp);
+                    }
+                }
             }
 
             saveOutputTracks();
@@ -2784,11 +2961,7 @@
                     break;
                 }
                 bool released = false;
-                // The following works around a bug in the offload driver. Ideally we would release
-                // the wake lock every time, but that causes the last offload buffer(s) to be
-                // dropped while the device is on battery, so we need to hold a wake lock during
-                // the drain phase.
-                if (mBytesRemaining && !(mDrainSequence & 1)) {
+                if (!keepWakeLock()) {
                     releaseWakeLock_l();
                     released = true;
                 }
@@ -2893,6 +3066,13 @@
                 void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
                 audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
 
+                // mono blend occurs for mixer threads only (not direct or offloaded)
+                // and is handled here if we're going directly to the sink.
+                if (requireMonoBlend() && !mEffectBufferValid) {
+                    mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount, mNormalFrameCount,
+                               true /*limit*/);
+                }
+
                 memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
                         mNormalFrameCount * mChannelCount);
             }
@@ -2902,6 +3082,7 @@
                 mSleepTimeUs = suspendSleepTimeUs();
                 // simulate write to HAL when suspended
                 mBytesWritten += mSinkBufferSize;
+                mFramesWritten += mSinkBufferSize / mFrameSize;
                 mBytesRemaining = 0;
             }
 
@@ -2928,6 +3109,12 @@
         // TODO use mSleepTimeUs == 0 as an additional condition.
         if (mEffectBufferValid) {
             //ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
+
+            if (requireMonoBlend()) {
+                mono_blend(mEffectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
+                           true /*limit*/);
+            }
+
             memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
                     mNormalFrameCount * mChannelCount);
         }
@@ -2939,13 +3126,23 @@
             // mSleepTimeUs == 0 means we must write to audio hardware
             if (mSleepTimeUs == 0) {
                 ssize_t ret = 0;
+                // We save lastWriteFinished here, as previousLastWriteFinished,
+                // for throttling. On thread start, previousLastWriteFinished will be
+                // set to -1, which properly results in no throttling after the first write.
+                nsecs_t previousLastWriteFinished = lastWriteFinished;
+                nsecs_t delta = 0;
                 if (mBytesRemaining) {
+                    // FIXME rewrite to reduce number of system calls
+                    mLastWriteTime = systemTime();  // also used for dumpsys
                     ret = threadLoop_write();
+                    lastWriteFinished = systemTime();
+                    delta = lastWriteFinished - mLastWriteTime;
                     if (ret < 0) {
                         mBytesRemaining = 0;
                     } else {
                         mBytesWritten += ret;
                         mBytesRemaining -= ret;
+                        mFramesWritten += ret / mFrameSize;
                     }
                 } else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
                         (mMixerStatus == MIXER_DRAIN_ALL)) {
@@ -2953,15 +3150,13 @@
                 }
                 if (mType == MIXER && !mStandby) {
                     // write blocked detection
-                    nsecs_t now = systemTime();
-                    nsecs_t delta = now - mLastWriteTime;
                     if (delta > maxPeriod) {
                         mNumDelayedWrites++;
-                        if ((now - lastWarning) > kWarningThrottleNs) {
+                        if ((lastWriteFinished - lastWarning) > kWarningThrottleNs) {
                             ATRACE_NAME("underrun");
                             ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
-                                    ns2ms(delta), mNumDelayedWrites, this);
-                            lastWarning = now;
+                                    (unsigned long long) ns2ms(delta), mNumDelayedWrites, this);
+                            lastWarning = lastWriteFinished;
                         }
                     }
 
@@ -2981,7 +3176,9 @@
                         // (2) minimum buffer sized tracks (even if the track is full,
                         //     the app won't fill fast enough to handle the sudden draw).
 
-                        const int32_t deltaMs = delta / 1000000;
+                        // it's OK if deltaMs is an overestimate.
+                        const int32_t deltaMs =
+                                (lastWriteFinished - previousLastWriteFinished) / 1000000;
                         const int32_t throttleMs = mHalfBufferMs - deltaMs;
                         if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
                             usleep(throttleMs * 1000);
@@ -2995,7 +3192,9 @@
                             uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
                             if (diff > 0) {
                                 // notify of throttle end on debug log
-                                ALOGD("mixer(%p) throttle end: throttle time(%u)", this, diff);
+                                // but prevent spamming for bluetooth
+                                ALOGD_IF(!audio_is_a2dp_out_device(outDevice()),
+                                        "mixer(%p) throttle end: throttle time(%u)", this, diff);
                                 mThreadThrottleEndMs = mThreadThrottleTimeMs;
                             }
                         }
@@ -3004,7 +3203,10 @@
 
             } else {
                 ATRACE_BEGIN("sleep");
-                usleep(mSleepTimeUs);
+                Mutex::Autolock _l(mLock);
+                if (!mSignalPending && mConfigEvents.isEmpty() && !exitPending()) {
+                    mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)mSleepTimeUs));
+                }
                 ATRACE_END();
             }
         }
@@ -3071,7 +3273,12 @@
 status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
 {
     if (mNormalSink != 0) {
-        return mNormalSink->getTimestamp(timestamp);
+        ExtendedTimestamp ets;
+        status_t status = mNormalSink->getTimestamp(ets);
+        if (status == NO_ERROR) {
+            status = ets.getBestTimestamp(&timestamp);
+        }
+        return status;
     }
     if ((mType == OFFLOAD || mType == DIRECT)
             && mOutput != NULL && mOutput->stream->get_presentation_position) {
@@ -3088,31 +3295,9 @@
 status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
                                                           audio_patch_handle_t *handle)
 {
-    // if !&IDLE, holds the FastMixer state to restore after new parameters processed
-    FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
-    if (mFastMixer != 0) {
-        FastMixerStateQueue *sq = mFastMixer->sq();
-        FastMixerState *state = sq->begin();
-        if (!(state->mCommand & FastMixerState::IDLE)) {
-            previousCommand = state->mCommand;
-            state->mCommand = FastMixerState::HOT_IDLE;
-            sq->end();
-            sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
-        } else {
-            sq->end(false /*didModify*/);
-        }
-    }
-    status_t status = PlaybackThread::createAudioPatch_l(patch, handle);
+    AutoPark<FastMixer> park(mFastMixer);
 
-    if (!(previousCommand & FastMixerState::IDLE)) {
-        ALOG_ASSERT(mFastMixer != 0);
-        FastMixerStateQueue *sq = mFastMixer->sq();
-        FastMixerState *state = sq->begin();
-        ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
-        state->mCommand = previousCommand;
-        sq->end();
-        sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
-    }
+    status_t status = PlaybackThread::createAudioPatch_l(patch, handle);
 
     return status;
 }
@@ -3195,33 +3380,10 @@
 
 status_t AudioFlinger::MixerThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
 {
-    // if !&IDLE, holds the FastMixer state to restore after new parameters processed
-    FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
-    if (mFastMixer != 0) {
-        FastMixerStateQueue *sq = mFastMixer->sq();
-        FastMixerState *state = sq->begin();
-        if (!(state->mCommand & FastMixerState::IDLE)) {
-            previousCommand = state->mCommand;
-            state->mCommand = FastMixerState::HOT_IDLE;
-            sq->end();
-            sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
-        } else {
-            sq->end(false /*didModify*/);
-        }
-    }
+    AutoPark<FastMixer> park(mFastMixer);
 
     status_t status = PlaybackThread::releaseAudioPatch_l(handle);
 
-    if (!(previousCommand & FastMixerState::IDLE)) {
-        ALOG_ASSERT(mFastMixer != 0);
-        FastMixerStateQueue *sq = mFastMixer->sq();
-        FastMixerState *state = sq->begin();
-        ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
-        state->mCommand = previousCommand;
-        sq->end();
-        sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
-    }
-
     return status;
 }
 
@@ -3270,14 +3432,15 @@
     :   PlaybackThread(audioFlinger, output, id, device, type, systemReady),
         // mAudioMixer below
         // mFastMixer below
-        mFastMixerFutex(0)
+        mFastMixerFutex(0),
+        mMasterMono(false)
         // mOutputSink below
         // mPipeSink below
         // mNormalSink below
 {
     ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
-    ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%u, "
-            "mFrameCount=%d, mNormalFrameCount=%d",
+    ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%zu, "
+            "mFrameCount=%zu, mNormalFrameCount=%zu",
             mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
             mNormalFrameCount);
     mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
@@ -3292,7 +3455,12 @@
     mOutputSink = new AudioStreamOutSink(output->stream);
     size_t numCounterOffers = 0;
     const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
-    ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG
+    ssize_t index =
+#else
+    (void)
+#endif
+            mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
     ALOG_ASSERT(index == 0);
 
     // initialize fast mixer depending on configuration
@@ -3327,7 +3495,9 @@
 
         // create a MonoPipe to connect our submix to FastMixer
         NBAIO_Format format = mOutputSink->format();
+#ifdef TEE_SINK
         NBAIO_Format origformat = format;
+#endif
         // adjust format to match that of the Fast Mixer
         ALOGV("format changed from %d to %d", format.mFormat, fastMixerFormat);
         format.mFormat = fastMixerFormat;
@@ -3339,7 +3509,12 @@
         MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
         const NBAIO_Format offers[1] = {format};
         size_t numCounterOffers = 0;
-        ssize_t index = monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG || defined(TEE_SINK)
+        ssize_t index =
+#else
+        (void)
+#endif
+                monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
         ALOG_ASSERT(index == 0);
         monoPipe->setAvgFrames((mScreenState & 1) ?
                 (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
@@ -3592,22 +3767,8 @@
 
 void AudioFlinger::MixerThread::threadLoop_mix()
 {
-    // obtain the presentation timestamp of the next output buffer
-    int64_t pts;
-    status_t status = INVALID_OPERATION;
-
-    if (mNormalSink != 0) {
-        status = mNormalSink->getNextWriteTimestamp(&pts);
-    } else {
-        status = mOutputSink->getNextWriteTimestamp(&pts);
-    }
-
-    if (status != NO_ERROR) {
-        pts = AudioBufferProvider::kInvalidPTS;
-    }
-
     // mix buffers...
-    mAudioMixer->process(pts);
+    mAudioMixer->process();
     mCurrentWriteLength = mSinkBufferSize;
     // increase sleep time progressively when application underrun condition clears.
     // Only increase sleep time if the mixer is ready for two consecutive times to avoid
@@ -3718,7 +3879,7 @@
             // at the identical fast mixer slot within the same normal mix cycle,
             // is impossible because the slot isn't marked available until the end of each cycle.
             int j = track->mFastIndex;
-            ALOG_ASSERT(0 < j && j < (int)FastMixerState::kMaxFastTracks);
+            ALOG_ASSERT(0 < j && j < (int)FastMixerState::sMaxFastTracks);
             ALOG_ASSERT(!(mFastTrackAvailMask & (1 << j)));
             FastTrack *fastTrack = &state->mFastTracks[j];
 
@@ -3740,6 +3901,8 @@
                     recentUnderruns > 0) {
                 // FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
                 track->mAudioTrackServerProxy->tallyUnderrunFrames(recentUnderruns * mFrameCount);
+            } else {
+                track->mAudioTrackServerProxy->tallyUnderrunFrames(0);
             }
 
             // This is similar to the state machine for normal tracks,
@@ -3782,7 +3945,7 @@
                     }
                     // indicate to client process that the track was disabled because of underrun;
                     // it will then automatically call start() when data is available
-                    android_atomic_or(CBLK_DISABLED, &track->mCblk->mFlags);
+                    track->disable();
                     // remove from active list, but state remains ACTIVE [confusing but true]
                     isActive = false;
                     break;
@@ -3798,7 +3961,7 @@
                 {
                     size_t audioHALFrames =
                             (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
-                    size_t framesWritten = mBytesWritten / mFrameSize;
+                    int64_t framesWritten = mBytesWritten / mFrameSize;
                     if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
                         // track stays in active list until presentation is complete
                         break;
@@ -3849,7 +4012,10 @@
                     // because we're about to decrement the last sp<> on those tracks.
                     block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
                 } else {
-                    LOG_ALWAYS_FATAL("fast track %d should have been active", j);
+                    LOG_ALWAYS_FATAL("fast track %d should have been active; "
+                            "mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
+                            j, track->mState, state->mTrackMask, recentUnderruns,
+                            track->sharedBuffer() != 0);
                 }
                 tracksToRemove->add(track);
                 // Avoids a misleading display in dumpsys
@@ -4106,7 +4272,10 @@
                 ALOGV("track(%p) underrun,  framesReady(%zu) < framesDesired(%zd)",
                         track, framesReady, desiredFrames);
                 track->mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
+            } else {
+                track->mAudioTrackServerProxy->tallyUnderrunFrames(0);
             }
+
             // clear effect chain input buffer if an active track underruns to avoid sending
             // previous audio buffer again to effects
             chain = getEffectChain_l(track->sessionId());
@@ -4122,7 +4291,7 @@
                 // TODO: use actual buffer filling status instead of latency when available from
                 // audio HAL
                 size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
-                size_t framesWritten = mBytesWritten / mFrameSize;
+                int64_t framesWritten = mBytesWritten / mFrameSize;
                 if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
                     if (track->isStopped()) {
                         track->reset();
@@ -4137,7 +4306,7 @@
                     tracksToRemove->add(track);
                     // indicate to client process that the track was disabled because of underrun;
                     // it will then automatically call start() when data is available
-                    android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+                    track->disable();
                 // If one track is not ready, mark the mixer also not ready if:
                 //  - the mixer was ready during previous round OR
                 //  - no other track is ready
@@ -4150,7 +4319,6 @@
         }
 
         }   // local variable scope to avoid goto warning
-track_is_ready: ;
 
     }
 
@@ -4242,7 +4410,7 @@
 
 // getTrackName_l() must be called with ThreadBase::mLock held
 int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask,
-        audio_format_t format, int sessionId)
+        audio_format_t format, audio_session_t sessionId)
 {
     return mAudioMixer->getTrackName(channelMask, format, sessionId);
 }
@@ -4263,20 +4431,7 @@
 
     status = NO_ERROR;
 
-    // if !&IDLE, holds the FastMixer state to restore after new parameters processed
-    FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
-    if (mFastMixer != 0) {
-        FastMixerStateQueue *sq = mFastMixer->sq();
-        FastMixerState *state = sq->begin();
-        if (!(state->mCommand & FastMixerState::IDLE)) {
-            previousCommand = state->mCommand;
-            state->mCommand = FastMixerState::HOT_IDLE;
-            sq->end();
-            sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
-        } else {
-            sq->end(false /*didModify*/);
-        }
-    }
+    AutoPark<FastMixer> park(mFastMixer);
 
     AudioParameter param = AudioParameter(keyValuePair);
     int value;
@@ -4371,33 +4526,24 @@
         }
     }
 
-    if (!(previousCommand & FastMixerState::IDLE)) {
-        ALOG_ASSERT(mFastMixer != 0);
-        FastMixerStateQueue *sq = mFastMixer->sq();
-        FastMixerState *state = sq->begin();
-        ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
-        state->mCommand = previousCommand;
-        sq->end();
-        sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
-    }
-
     return reconfig || a2dpDeviceChanged;
 }
 
 
 void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
     PlaybackThread::dumpInternals(fd, args);
     dprintf(fd, "  Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
     dprintf(fd, "  AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
+    dprintf(fd, "  Master mono: %s\n", mMasterMono ? "on" : "off");
 
     // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
-    const FastMixerDumpState copy(mFastMixerDumpState);
-    copy.dump(fd);
+    // while we are dumping it.  It may be inconsistent, but it won't mutate!
+    // This is a large object so we place it on the heap.
+    // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+    const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
+    copy->dump(fd);
+    delete copy;
 
 #ifdef STATE_QUEUE_DUMP
     // Similar for state queue
@@ -4465,7 +4611,6 @@
 
 void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
 {
-    audio_track_cblk_t* cblk = track->cblk();
     float left, right;
 
     if (mMasterMute || mStreamTypes[track->streamType()].mute) {
@@ -4554,7 +4699,9 @@
         }
 
         Track* const track = t.get();
+#ifdef VERY_VERY_VERBOSE_LOGGING
         audio_track_cblk_t* cblk = track->cblk();
+#endif
         // Only consider last track started for volume and mixer state control.
         // In theory an older track could underrun and restart after the new one starts
         // but as we only care about the transition phase between two tracks on a
@@ -4591,7 +4738,7 @@
         // Do not use a high threshold for compressed audio.
         uint32_t minFrames;
         if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()
-            && (track->mRetryCount > 1) && audio_is_linear_pcm(mFormat)) {
+            && (track->mRetryCount > 1) && audio_has_proportional_frames(mFormat)) {
             minFrames = mNormalFrameCount;
         } else {
             minFrames = 1;
@@ -4652,13 +4799,13 @@
                 // We have consumed all the buffers of this track.
                 // Remove it from the list of active tracks.
                 size_t audioHALFrames;
-                if (audio_is_linear_pcm(mFormat)) {
+                if (audio_has_proportional_frames(mFormat)) {
                     audioHALFrames = (latency_l() * mSampleRate) / 1000;
                 } else {
                     audioHALFrames = 0;
                 }
 
-                size_t framesWritten = mBytesWritten / mFrameSize;
+                int64_t framesWritten = mBytesWritten / mFrameSize;
                 if (mStandby || !last ||
                         track->presentationComplete(framesWritten, audioHALFrames)) {
                     if (track->isStopping_2()) {
@@ -4678,7 +4825,7 @@
                     tracksToRemove->add(track);
                     // indicate to client process that the track was disabled because of underrun;
                     // it will then automatically call start() when data is available
-                    android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+                    track->disable();
                 } else if (last) {
                     ALOGW("pause because of UNDERRUN, framesReady = %zu,"
                             "minFrames = %u, mFormat = %#x",
@@ -4733,7 +4880,10 @@
         buffer.frameCount = frameCount;
         status_t status = mActiveTrack->getNextBuffer(&buffer);
         if (status != NO_ERROR || buffer.raw == NULL) {
-            memset(curBuf, 0, frameCount * mFrameSize);
+            // no need to pad with 0 for compressed audio
+            if (audio_has_proportional_frames(mFormat)) {
+                memset(curBuf, 0, frameCount * mFrameSize);
+            }
             break;
         }
         memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
@@ -4760,7 +4910,7 @@
         } else {
             mSleepTimeUs = mIdleSleepTimeUs;
         }
-    } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
+    } else if (mBytesWritten != 0 && audio_has_proportional_frames(mFormat)) {
         memset(mSinkBuffer, 0, mFrameCount * mFrameSize);
         mSleepTimeUs = 0;
     }
@@ -4789,6 +4939,10 @@
     bool trackPaused = false;
     bool trackStopped = false;
 
+    if ((mType == DIRECT) && audio_is_linear_pcm(mFormat) && !usesHwAvSync()) {
+        return !mStandby;
+    }
+
     // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
     // after a timeout and we will enter standby then.
     if (mTracks.size() > 0) {
@@ -4802,7 +4956,7 @@
 
 // getTrackName_l() must be called with ThreadBase::mLock held
 int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask __unused,
-        audio_format_t format __unused, int sessionId __unused)
+        audio_format_t format __unused, audio_session_t sessionId __unused)
 {
     return 0;
 }
@@ -4867,10 +5021,10 @@
 uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs() const
 {
     uint32_t time;
-    if (audio_is_linear_pcm(mFormat)) {
+    if (audio_has_proportional_frames(mFormat)) {
         time = PlaybackThread::activeSleepTimeUs();
     } else {
-        time = 10000;
+        time = kDirectMinSleepTimeUs;
     }
     return time;
 }
@@ -4878,10 +5032,10 @@
 uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs() const
 {
     uint32_t time;
-    if (audio_is_linear_pcm(mFormat)) {
+    if (audio_has_proportional_frames(mFormat)) {
         time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
     } else {
-        time = 10000;
+        time = kDirectMinSleepTimeUs;
     }
     return time;
 }
@@ -4889,10 +5043,10 @@
 uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs() const
 {
     uint32_t time;
-    if (audio_is_linear_pcm(mFormat)) {
+    if (audio_has_proportional_frames(mFormat)) {
         time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
     } else {
-        time = 10000;
+        time = kDirectMinSleepTimeUs;
     }
     return time;
 }
@@ -4906,7 +5060,7 @@
     // no delay on outputs with HW A/V sync
     if (usesHwAvSync()) {
         mStandbyDelayNs = 0;
-    } else if ((mType == OFFLOAD) && !audio_is_linear_pcm(mFormat)) {
+    } else if ((mType == OFFLOAD) && !audio_has_proportional_frames(mFormat)) {
         mStandbyDelayNs = kOffloadStandbyDelayNs;
     } else {
         mStandbyDelayNs = microseconds(mActiveSleepTimeUs*2);
@@ -5026,10 +5180,11 @@
 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
         AudioStreamOut* output, audio_io_handle_t id, uint32_t device, bool systemReady)
     :   DirectOutputThread(audioFlinger, output, id, device, OFFLOAD, systemReady),
-        mPausedBytesRemaining(0)
+        mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
 {
     //FIXME: mStandby should be set to true by ThreadBase constructor
     mStandby = true;
+    mKeepWakeLock = property_get_bool("ro.audio.offload_wakelock", true /* default_value */);
 }
 
 void AudioFlinger::OffloadThread::threadLoop_exit()
@@ -5058,7 +5213,7 @@
     bool doHwPause = false;
     bool doHwResume = false;
 
-    ALOGV("OffloadThread::prepareTracks_l active tracks %d", count);
+    ALOGV("OffloadThread::prepareTracks_l active tracks %zu", count);
 
     // find out which tracks need to be processed
     for (size_t i = 0; i < count; i++) {
@@ -5068,7 +5223,9 @@
             continue;
         }
         Track* const track = t.get();
+#ifdef VERY_VERY_VERBOSE_LOGGING
         audio_track_cblk_t* cblk = track->cblk();
+#endif
         // Only consider last track started for volume and mixer state control.
         // In theory an older track could underrun and restart after the new one starts
         // but as we only care about the transition phase between two tracks on a
@@ -5106,6 +5263,11 @@
             }
             tracksToRemove->add(track);
         } else if (track->isFlushPending()) {
+            if (track->isStopping_1()) {
+                track->mRetryCount = kMaxTrackStopRetriesOffload;
+            } else {
+                track->mRetryCount = kMaxTrackRetriesOffload;
+            }
             track->flushAck();
             if (last) {
                 mFlushPending = true;
@@ -5166,38 +5328,47 @@
                 }
                 mPreviousTrack = track;
                 // reset retry count
-                track->mRetryCount = kMaxTrackRetriesOffload;
+                if (track->isStopping_1()) {
+                    track->mRetryCount = kMaxTrackStopRetriesOffload;
+                } else {
+                    track->mRetryCount = kMaxTrackRetriesOffload;
+                }
                 mActiveTrack = t;
                 mixerStatus = MIXER_TRACKS_READY;
             }
         } else {
             ALOGVV("OffloadThread: track %d s=%08x [NOT READY]", track->name(), cblk->mServer);
             if (track->isStopping_1()) {
-                // Hardware buffer can hold a large amount of audio so we must
-                // wait for all current track's data to drain before we say
-                // that the track is stopped.
-                if (mBytesRemaining == 0) {
-                    // Only start draining when all data in mixbuffer
-                    // has been written
-                    ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
-                    track->mState = TrackBase::STOPPING_2; // so presentation completes after drain
-                    // do not drain if no data was ever sent to HAL (mStandby == true)
-                    if (last && !mStandby) {
-                        // do not modify drain sequence if we are already draining. This happens
-                        // when resuming from pause after drain.
-                        if ((mDrainSequence & 1) == 0) {
-                            mSleepTimeUs = 0;
-                            mStandbyTimeNs = systemTime() + mStandbyDelayNs;
-                            mixerStatus = MIXER_DRAIN_TRACK;
-                            mDrainSequence += 2;
-                        }
-                        if (mHwPaused) {
-                            // It is possible to move from PAUSED to STOPPING_1 without
-                            // a resume so we must ensure hardware is running
-                            doHwResume = true;
-                            mHwPaused = false;
+                if (--(track->mRetryCount) <= 0) {
+                    // Hardware buffer can hold a large amount of audio so we must
+                    // wait for all current track's data to drain before we say
+                    // that the track is stopped.
+                    if (mBytesRemaining == 0) {
+                        // Only start draining when all data in mixbuffer
+                        // has been written
+                        ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
+                        track->mState = TrackBase::STOPPING_2; // so presentation completes after
+                        // drain do not drain if no data was ever sent to HAL (mStandby == true)
+                        if (last && !mStandby) {
+                            // do not modify drain sequence if we are already draining. This happens
+                            // when resuming from pause after drain.
+                            if ((mDrainSequence & 1) == 0) {
+                                mSleepTimeUs = 0;
+                                mStandbyTimeNs = systemTime() + mStandbyDelayNs;
+                                mixerStatus = MIXER_DRAIN_TRACK;
+                                mDrainSequence += 2;
+                            }
+                            if (mHwPaused) {
+                                // It is possible to move from PAUSED to STOPPING_1 without
+                                // a resume so we must ensure hardware is running
+                                doHwResume = true;
+                                mHwPaused = false;
+                            }
                         }
                     }
+                } else if (last) {
+                    ALOGV("stopping1 underrun retries left %d", track->mRetryCount);
+                    mixerStatus = MIXER_TRACKS_ENABLED;
                 }
             } else if (track->isStopping_2()) {
                 // Drain has completed or we are in standby, signal presentation complete
@@ -5205,7 +5376,7 @@
                     track->mState = TrackBase::STOPPED;
                     size_t audioHALFrames =
                             (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
-                    size_t framesWritten =
+                    int64_t framesWritten =
                             mBytesWritten / mOutput->getFrameSize();
                     track->presentationComplete(framesWritten, audioHALFrames);
                     track->reset();
@@ -5220,7 +5391,7 @@
                     tracksToRemove->add(track);
                     // indicate to client process that the track was disabled because of underrun;
                     // it will then automatically call start() when data is available
-                    android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+                    track->disable();
                 } else if (last){
                     mixerStatus = MIXER_TRACKS_ENABLED;
                 }
@@ -5275,6 +5446,8 @@
     mBytesRemaining = 0;
     mPausedWriteLength = 0;
     mPausedBytesRemaining = 0;
+    // reset bytes written count to reflect that DSP buffers are empty after flush.
+    mBytesWritten = 0;
 
     if (mUseAsyncWrite) {
         // discard any pending drain or write ack by incrementing sequence
@@ -5286,6 +5459,14 @@
     }
 }
 
+void AudioFlinger::OffloadThread::invalidateTracks(audio_stream_type_t streamType)
+{
+    Mutex::Autolock _l(mLock);
+    if (PlaybackThread::invalidateTracks_l(streamType)) {
+        mFlushPending = true;
+    }
+}
+
 // ----------------------------------------------------------------------------
 
 AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
@@ -5308,7 +5489,7 @@
 {
     // mix buffers...
     if (outputsReady(outputTracks)) {
-        mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
+        mAudioMixer->process();
     } else {
         if (mMixerBufferValid) {
             memset(mMixerBuffer, 0, mMixerBufferSize);
@@ -5505,7 +5686,12 @@
     mInputSource = new AudioStreamInSource(input->stream);
     size_t numCounterOffers = 0;
     const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
-    ssize_t index = mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG
+    ssize_t index =
+#else
+    (void)
+#endif
+            mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
     ALOG_ASSERT(index == 0);
 
     // initialize fast capture depending on configuration
@@ -5583,7 +5769,7 @@
         // start the fast capture
         mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
         pid_t tid = mFastCapture->getTid();
-        sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
+        sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture);
 #ifdef AUDIO_WATCHDOG
         // FIXME
 #endif
@@ -5848,15 +6034,17 @@
         if (mPipeSource != 0) {
             size_t framesToRead = mBufferSize / mFrameSize;
             framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
-                    framesToRead, AudioBufferProvider::kInvalidPTS);
+                    framesToRead);
             if (framesRead == 0) {
                 // since pipe is non-blocking, simulate blocking input
                 sleepUs = (framesToRead * 1000000LL) / mSampleRate;
             }
         // otherwise use the HAL / AudioStreamIn directly
         } else {
+            ATRACE_BEGIN("read");
             ssize_t bytesRead = mInput->stream->read(mInput->stream,
                     (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize);
+            ATRACE_END();
             if (bytesRead < 0) {
                 framesRead = bytesRead;
             } else {
@@ -5864,8 +6052,30 @@
             }
         }
 
+        // Update server timestamp with server stats
+        // systemTime() is optional if the hardware supports timestamps.
+        mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
+        mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
+
+        // Update server timestamp with kernel stats
+        if (mInput->stream->get_capture_position != nullptr) {
+            int64_t position, time;
+            int ret = mInput->stream->get_capture_position(mInput->stream, &position, &time);
+            if (ret == NO_ERROR) {
+                mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
+                mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
+                // Note: In general record buffers should tend to be empty in
+                // a properly running pipeline.
+                //
+                // Also, it is not advantageous to call get_presentation_position during the read
+                // as the read obtains a lock, preventing the timestamp call from executing.
+            }
+        }
+        // Use this to track timestamp information
+        // ALOGD("%s", mTimestamp.toString().c_str());
+
         if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) {
-            ALOGE("read failed: framesRead=%d", framesRead);
+            ALOGE("read failed: framesRead=%zd", framesRead);
             // Force input into standby so that it tries to recover at next read attempt
             inputStandBy();
             sleepUs = kRecordThreadSleepUs;
@@ -5962,7 +6172,8 @@
                                   (activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled",
                                   activeTrack->sessionId(),
                                   (activeTrack->mSyncStartEvent != 0) ?
-                                          activeTrack->mSyncStartEvent->triggerSession() : 0);
+                                          activeTrack->mSyncStartEvent->triggerSession() :
+                                          AUDIO_SESSION_NONE);
                             activeTrack->clearSyncStartEvent();
                         }
                     }
@@ -5992,6 +6203,11 @@
                 break;
             }
 
+            // update frame information and push timestamp out
+            activeTrack->updateTrackFrameInfo(
+                    activeTrack->mServerProxy->framesReleased(),
+                    mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER],
+                    mSampleRate, mTimestamp);
         }
 
 unlock:
@@ -6063,7 +6279,7 @@
         audio_format_t format,
         audio_channel_mask_t channelMask,
         size_t *pFrameCount,
-        int sessionId,
+        audio_session_t sessionId,
         size_t *notificationFrames,
         int uid,
         IAudioFlinger::track_flags_t *flags,
@@ -6084,21 +6300,21 @@
             ((frameCount == 0) || (frameCount == mPipeFramesP2)) &&
             // PCM data
             audio_is_linear_pcm(format) &&
-            // native format
+            // hardware format
             (format == mFormat) &&
-            // native channel mask
+            // hardware channel mask
             (channelMask == mChannelMask) &&
-            // native hardware sample rate
+            // hardware sample rate
             (sampleRate == mSampleRate) &&
             // record thread has an associated fast capture
             hasFastCapture() &&
             // there are sufficient fast track slots available
             mFastTrackAvail
         ) {
-        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%u mFrameCount=%u",
+        ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
                 frameCount, mFrameCount);
       } else {
-        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%u mFrameCount=%u mPipeFramesP2=%u "
+        ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
                 "format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
                 "hasFastCapture=%d tid=%d mFastTrackAvail=%d",
                 frameCount, mFrameCount, mPipeFramesP2,
@@ -6182,7 +6398,7 @@
 
 status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
                                            AudioSystem::sync_event_t event,
-                                           int triggerSession)
+                                           audio_session_t triggerSession)
 {
     ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
     sp<ThreadBase> strongMe = this;
@@ -6229,7 +6445,7 @@
         status_t status = NO_ERROR;
         if (recordTrack->isExternalTrack()) {
             mLock.unlock();
-            status = AudioSystem::startInput(mId, (audio_session_t)recordTrack->sessionId());
+            status = AudioSystem::startInput(mId, recordTrack->sessionId());
             mLock.lock();
             // FIXME should verify that recordTrack is still in mActiveTracks
             if (status != NO_ERROR) {
@@ -6261,7 +6477,7 @@
 
 startError:
     if (recordTrack->isExternalTrack()) {
-        AudioSystem::stopInput(mId, (audio_session_t)recordTrack->sessionId());
+        AudioSystem::stopInput(mId, recordTrack->sessionId());
     }
     recordTrack->clearSyncStartEvent();
     // FIXME I wonder why we do not reset the state here?
@@ -6315,7 +6531,7 @@
         return BAD_VALUE;
     }
 
-    int eventSession = event->triggerSession();
+    audio_session_t eventSession = event->triggerSession();
     status_t ret = NAME_NOT_FOUND;
 
     Mutex::Autolock _l(mLock);
@@ -6373,9 +6589,13 @@
     dprintf(fd, "  Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
     dprintf(fd, "  Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
 
-    //  Make a non-atomic copy of fast capture dump state so it won't change underneath us
-    const FastCaptureDumpState copy(mFastCaptureDumpState);
-    copy.dump(fd);
+    // Make a non-atomic copy of fast capture dump state so it won't change underneath us
+    // while we are dumping it.  It may be inconsistent, but it won't mutate!
+    // This is a large object so we place it on the heap.
+    // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+    const FastCaptureDumpState *copy = new FastCaptureDumpState(mFastCaptureDumpState);
+    copy->dump(fd);
+    delete copy;
 }
 
 void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
@@ -6387,9 +6607,9 @@
     size_t numtracks = mTracks.size();
     size_t numactive = mActiveTracks.size();
     size_t numactiveseen = 0;
-    dprintf(fd, "  %d Tracks", numtracks);
+    dprintf(fd, "  %zu Tracks", numtracks);
     if (numtracks) {
-        dprintf(fd, " of which %d are active\n", numactive);
+        dprintf(fd, " of which %zu are active\n", numactive);
         RecordTrack::appendDumpHeader(result);
         for (size_t i = 0; i < numtracks ; ++i) {
             sp<RecordTrack> track = mTracks[i];
@@ -6466,7 +6686,7 @@
 
 // AudioBufferProvider interface
 status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer(
-        AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
+        AudioBufferProvider::Buffer* buffer)
 {
     sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
     if (threadBase == 0) {
@@ -6567,7 +6787,7 @@
         AudioBufferProvider::Buffer buffer;
         for (size_t i = frames; i > 0; ) {
             buffer.frameCount = i;
-            status_t status = provider->getNextBuffer(&buffer, 0);
+            status_t status = provider->getNextBuffer(&buffer);
             if (status != OK || buffer.frameCount == 0) {
                 frames -= i; // cannot fill request.
                 break;
@@ -6782,6 +7002,10 @@
 
     AudioParameter param = AudioParameter(keyValuePair);
     int value;
+
+    // scope for AutoPark extends to end of method
+    AutoPark<FastCapture> park(mFastCapture);
+
     // TODO Investigate when this code runs. Check with audio policy when a sample rate and
     //      channel count change can be requested. Do we mandate the first client defines the
     //      HAL sampling rate and channel count or do we allow changes on the fly?
@@ -6912,6 +7136,7 @@
         desc->mSamplingRate = mSampleRate;
         desc->mFormat = mFormat;
         desc->mFrameCount = mFrameCount;
+        desc->mFrameCountHAL = mFrameCount;
         desc->mLatency = 0;
         break;
 
@@ -6977,7 +7202,7 @@
     return mInput->stream->get_input_frames_lost(mInput->stream);
 }
 
-uint32_t AudioFlinger::RecordThread::hasAudioSession(int sessionId) const
+uint32_t AudioFlinger::RecordThread::hasAudioSession(audio_session_t sessionId) const
 {
     Mutex::Autolock _l(mLock);
     uint32_t result = 0;
@@ -6995,13 +7220,13 @@
     return result;
 }
 
-KeyedVector<int, bool> AudioFlinger::RecordThread::sessionIds() const
+KeyedVector<audio_session_t, bool> AudioFlinger::RecordThread::sessionIds() const
 {
-    KeyedVector<int, bool> ids;
+    KeyedVector<audio_session_t, bool> ids;
     Mutex::Autolock _l(mLock);
     for (size_t j = 0; j < mTracks.size(); ++j) {
         sp<RecordThread::RecordTrack> track = mTracks[j];
-        int sessionId = track->sessionId();
+        audio_session_t sessionId = track->sessionId();
         if (ids.indexOfKey(sessionId) < 0) {
             ids.add(sessionId, true);
         }
@@ -7053,7 +7278,7 @@
 {
     ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
     ALOGW_IF(mEffectChains.size() != 1,
-            "removeEffectChain_l() %p invalid chain size %d on thread %p",
+            "removeEffectChain_l() %p invalid chain size %zu on thread %p",
             chain.get(), mEffectChains.size(), this);
     if (mEffectChains.size() == 1) {
         mEffectChains.removeAt(0);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 46ac300..787b5c4 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -247,6 +247,10 @@
                 // Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
                 // and returns the [normal mix] buffer's frame count.
     virtual     size_t      frameCount() const = 0;
+
+                // Return's the HAL's frame count i.e. fast mixer buffer size.
+                size_t      frameCountHAL() const { return mFrameCount; }
+
                 size_t      frameSize() const { return mFrameSize; }
 
     // Should be "virtual status_t requestExitAndWait()" and override same
@@ -288,7 +292,7 @@
                                     const sp<AudioFlinger::Client>& client,
                                     const sp<IEffectClient>& effectClient,
                                     int32_t priority,
-                                    int sessionId,
+                                    audio_session_t sessionId,
                                     effect_descriptor_t *desc,
                                     int *enabled,
                                     status_t *status /*non-NULL*/);
@@ -302,9 +306,9 @@
                 };
 
                 // get effect chain corresponding to session Id.
-                sp<EffectChain> getEffectChain(int sessionId);
+                sp<EffectChain> getEffectChain(audio_session_t sessionId);
                 // same as getEffectChain() but must be called with ThreadBase mutex locked
-                sp<EffectChain> getEffectChain_l(int sessionId) const;
+                sp<EffectChain> getEffectChain_l(audio_session_t sessionId) const;
                 // add an effect chain to the chain list (mEffectChains)
     virtual     status_t addEffectChain_l(const sp<EffectChain>& chain) = 0;
                 // remove an effect chain from the chain list (mEffectChains)
@@ -321,8 +325,8 @@
                 // set audio mode to all effect chains
                 void setMode(audio_mode_t mode);
                 // get effect module with corresponding ID on specified audio session
-                sp<AudioFlinger::EffectModule> getEffect(int sessionId, int effectId);
-                sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId);
+                sp<AudioFlinger::EffectModule> getEffect(audio_session_t sessionId, int effectId);
+                sp<AudioFlinger::EffectModule> getEffect_l(audio_session_t sessionId, int effectId);
                 // add and effect module. Also creates the effect chain is none exists for
                 // the effects audio session
                 status_t addEffect_l(const sp< EffectModule>& effect);
@@ -333,24 +337,27 @@
     virtual     void detachAuxEffect_l(int effectId __unused) {}
                 // returns either EFFECT_SESSION if effects on this audio session exist in one
                 // chain, or TRACK_SESSION if tracks on this audio session exist, or both
-                virtual uint32_t hasAudioSession(int sessionId) const = 0;
+                virtual uint32_t hasAudioSession(audio_session_t sessionId) const = 0;
                 // the value returned by default implementation is not important as the
                 // strategy is only meaningful for PlaybackThread which implements this method
-                virtual uint32_t getStrategyForSession_l(int sessionId __unused) { return 0; }
+                virtual uint32_t getStrategyForSession_l(audio_session_t sessionId __unused)
+                        { return 0; }
 
                 // suspend or restore effect according to the type of effect passed. a NULL
                 // type pointer means suspend all effects in the session
                 void setEffectSuspended(const effect_uuid_t *type,
                                         bool suspend,
-                                        int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+                                        audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX);
                 // check if some effects must be suspended/restored when an effect is enabled
                 // or disabled
                 void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
                                                  bool enabled,
-                                                 int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+                                                 audio_session_t sessionId =
+                                                        AUDIO_SESSION_OUTPUT_MIX);
                 void checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
                                                    bool enabled,
-                                                   int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+                                                   audio_session_t sessionId =
+                                                        AUDIO_SESSION_OUTPUT_MIX);
 
                 virtual status_t    setSyncEvent(const sp<SyncEvent>& event) = 0;
                 virtual bool        isValidSyncEvent(const sp<SyncEvent>& event) const = 0;
@@ -381,7 +388,7 @@
                 };
 
                 void        acquireWakeLock(int uid = -1);
-                void        acquireWakeLock_l(int uid = -1);
+                virtual void acquireWakeLock_l(int uid = -1);
                 void        releaseWakeLock();
                 void        releaseWakeLock_l();
                 void        updateWakeLockUids(const SortedVector<int> &uids);
@@ -389,17 +396,19 @@
                 void        getPowerManager_l();
                 void setEffectSuspended_l(const effect_uuid_t *type,
                                           bool suspend,
-                                          int sessionId);
+                                          audio_session_t sessionId);
                 // updated mSuspendedSessions when an effect suspended or restored
                 void        updateSuspendedSessions_l(const effect_uuid_t *type,
                                                       bool suspend,
-                                                      int sessionId);
+                                                      audio_session_t sessionId);
                 // check if some effects must be suspended when an effect chain is added
                 void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
 
                 String16 getWakeLockTag();
 
     virtual     void        preExit() { }
+    virtual     void        setMasterMono_l(bool mono __unused) { }
+    virtual     bool        requireMonoBlend() { return false; }
 
     friend class AudioFlinger;      // for mEffectChains
 
@@ -450,13 +459,15 @@
                 sp<IPowerManager>       mPowerManager;
                 sp<IBinder>             mWakeLockToken;
                 const sp<PMDeathRecipient> mDeathRecipient;
-                // list of suspended effects per session and per type. The first vector is
-                // keyed by session ID, the second by type UUID timeLow field
-                KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > >
+                // list of suspended effects per session and per type. The first (outer) vector is
+                // keyed by session ID, the second (inner) by type UUID timeLow field
+                KeyedVector< audio_session_t, KeyedVector< int, sp<SuspendedSessionDesc> > >
                                         mSuspendedSessions;
                 static const size_t     kLogSize = 4 * 1024;
                 sp<NBLog::Writer>       mNBLogWriter;
                 bool                    mSystemReady;
+                bool                    mNotifiedBatteryStart;
+                ExtendedTimestamp       mTimestamp;
 };
 
 // --- PlaybackThread ---
@@ -477,9 +488,11 @@
 
     // retry count before removing active track in case of underrun on offloaded thread:
     // we need to make sure that AudioTrack client has enough time to send large buffers
-//FIXME may be more appropriate if expressed in time units. Need to revise how underrun is handled
-    // for offloaded tracks
+    //FIXME may be more appropriate if expressed in time units. Need to revise how underrun is
+    // handled for offloaded tracks
     static const int8_t kMaxTrackRetriesOffload = 20;
+    static const int8_t kMaxTrackStartupRetriesOffload = 100;
+    static const int8_t kMaxTrackStopRetriesOffload = 2;
 
     PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
                    audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
@@ -525,6 +538,8 @@
     // ThreadBase virtuals
     virtual     void        preExit();
 
+    virtual     bool        keepWakeLock() const { return true; }
+
 public:
 
     virtual     status_t    initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; }
@@ -550,7 +565,7 @@
                                 audio_channel_mask_t channelMask,
                                 size_t *pFrameCount,
                                 const sp<IMemory>& sharedBuffer,
-                                int sessionId,
+                                audio_session_t sessionId,
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
                                 int uid,
@@ -590,21 +605,19 @@
 
                 virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
                 virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
-                virtual uint32_t hasAudioSession(int sessionId) const;
-                virtual uint32_t getStrategyForSession_l(int sessionId);
+                virtual uint32_t hasAudioSession(audio_session_t sessionId) const;
+                virtual uint32_t getStrategyForSession_l(audio_session_t sessionId);
 
 
                 virtual status_t setSyncEvent(const sp<SyncEvent>& event);
                 virtual bool     isValidSyncEvent(const sp<SyncEvent>& event) const;
 
                 // called with AudioFlinger lock held
-                        void     invalidateTracks(audio_stream_type_t streamType);
+                        bool     invalidateTracks_l(audio_stream_type_t streamType);
+                virtual void     invalidateTracks(audio_stream_type_t streamType);
 
     virtual     size_t      frameCount() const { return mNormalFrameCount; }
 
-                // Return's the HAL's frame count i.e. fast mixer buffer size.
-                size_t      frameCountHAL() const { return mFrameCount; }
-
                 status_t    getTimestamp_l(AudioTimestamp& timestamp);
 
                 void        addPatchTrack(const sp<PatchTrack>& track);
@@ -688,9 +701,8 @@
     // 'volatile' means accessed via atomic operations and no lock.
     volatile int32_t                mSuspended;
 
-    // FIXME overflows every 6+ hours at 44.1 kHz stereo 16-bit samples
-    // mFramesWritten would be better, or 64-bit even better
-    size_t                          mBytesWritten;
+    int64_t                         mBytesWritten;
+    int64_t                         mFramesWritten; // not reset on standby
 private:
     // mMasterMute is in both PlaybackThread and in AudioFlinger.  When a
     // PlaybackThread needs to find out if master-muted, it checks it's local
@@ -706,7 +718,7 @@
     // Allocate a track name for a given channel mask.
     //   Returns name >= 0 if successful, -1 on failure.
     virtual int             getTrackName_l(audio_channel_mask_t channelMask,
-                                           audio_format_t format, int sessionId) = 0;
+                                           audio_format_t format, audio_session_t sessionId) = 0;
     virtual void            deleteTrackName_l(int name) = 0;
 
     // Time to sleep between cycles when:
@@ -838,19 +850,6 @@
                 bool        mHwSupportsPause;
                 bool        mHwPaused;
                 bool        mFlushPending;
-private:
-    // timestamp latch:
-    //  D input is written by threadLoop_write while mutex is unlocked, and read while locked
-    //  Q output is written while locked, and read while locked
-    struct {
-        AudioTimestamp  mTimestamp;
-        uint32_t        mUnpresentedFrames;
-        KeyedVector<Track *, uint32_t> mFramesReleased;
-    } mLatchD, mLatchQ;
-    bool mLatchDValid;  // true means mLatchD is valid
-                        //     (except for mFramesReleased which is filled in later),
-                        //     and clock it into latch at next opportunity
-    bool mLatchQValid;  // true means mLatchQ is valid
 };
 
 class MixerThread : public PlaybackThread {
@@ -872,12 +871,20 @@
 protected:
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
     virtual     int         getTrackName_l(audio_channel_mask_t channelMask,
-                                           audio_format_t format, int sessionId);
+                                           audio_format_t format, audio_session_t sessionId);
     virtual     void        deleteTrackName_l(int name);
     virtual     uint32_t    idleSleepTimeUs() const;
     virtual     uint32_t    suspendSleepTimeUs() const;
     virtual     void        cacheParameters_l();
 
+    virtual void acquireWakeLock_l(int uid = -1) {
+        PlaybackThread::acquireWakeLock_l(uid);
+        if (hasFastMixer()) {
+            mFastMixer->setBoottimeOffset(
+                    mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME]);
+        }
+    }
+
     // threadLoop snippets
     virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
@@ -908,13 +915,25 @@
                 //          mFastMixer->sq()    // for mutating and pushing state
                 int32_t     mFastMixerFutex;    // for cold idle
 
+                std::atomic_bool mMasterMono;
 public:
     virtual     bool        hasFastMixer() const { return mFastMixer != 0; }
     virtual     FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const {
-                              ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks);
+                              ALOG_ASSERT(fastIndex < FastMixerState::sMaxFastTracks);
                               return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
                             }
 
+protected:
+    virtual     void       setMasterMono_l(bool mono) {
+                               mMasterMono.store(mono);
+                               if (mFastMixer != nullptr) { /* hasFastMixer() */
+                                   mFastMixer->setMasterMono(mMasterMono);
+                               }
+                           }
+                // the FastMixer performs mono blend if it exists.
+                // Blending with limiter is not idempotent,
+                // and blending without limiter is idempotent but inefficient to do twice.
+    virtual     bool       requireMonoBlend() { return mMasterMono.load() && !hasFastMixer(); }
 };
 
 class DirectOutputThread : public PlaybackThread {
@@ -932,7 +951,7 @@
 
 protected:
     virtual     int         getTrackName_l(audio_channel_mask_t channelMask,
-                                           audio_format_t format, int sessionId);
+                                           audio_format_t format, audio_session_t sessionId);
     virtual     void        deleteTrackName_l(int name);
     virtual     uint32_t    activeSleepTimeUs() const;
     virtual     uint32_t    idleSleepTimeUs() const;
@@ -981,10 +1000,14 @@
 
     virtual     bool        waitingAsyncCallback();
     virtual     bool        waitingAsyncCallback_l();
+    virtual     void        invalidateTracks(audio_stream_type_t streamType);
+
+    virtual     bool        keepWakeLock() const { return mKeepWakeLock; }
 
 private:
     size_t      mPausedWriteLength;     // length in bytes of write interrupted by pause
     size_t      mPausedBytesRemaining;  // bytes still waiting in mixbuffer after resume
+    bool        mKeepWakeLock;          // keep wake lock while waiting for write callback
 };
 
 class AsyncCallbackThread : public Thread {
@@ -1096,7 +1119,7 @@
         virtual void sync(size_t *framesAvailable = NULL, bool *hasOverrun = NULL);
 
         // AudioBufferProvider interface
-        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
+        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer);
         virtual void        releaseBuffer(AudioBufferProvider::Buffer* buffer);
     private:
         RecordTrack * const mRecordTrack;
@@ -1232,7 +1255,7 @@
                     audio_format_t format,
                     audio_channel_mask_t channelMask,
                     size_t *pFrameCount,
-                    int sessionId,
+                    audio_session_t sessionId,
                     size_t *notificationFrames,
                     int uid,
                     IAudioFlinger::track_flags_t *flags,
@@ -1241,7 +1264,7 @@
 
             status_t    start(RecordTrack* recordTrack,
                               AudioSystem::sync_event_t event,
-                              int triggerSession);
+                              audio_session_t triggerSession);
 
             // ask the thread to stop the specified track, and
             // return true if the caller should then do it's part of the stopping process
@@ -1269,12 +1292,12 @@
 
     virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
     virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
-    virtual uint32_t hasAudioSession(int sessionId) const;
+    virtual uint32_t hasAudioSession(audio_session_t sessionId) const;
 
             // Return the set of unique session IDs across all tracks.
             // The keys are the session IDs, and the associated values are meaningless.
             // FIXME replace by Set [and implement Bag/Multiset for other uses].
-            KeyedVector<int, bool> sessionIds() const;
+            KeyedVector<audio_session_t, bool> sessionIds() const;
 
     virtual status_t setSyncEvent(const sp<SyncEvent>& event);
     virtual bool     isValidSyncEvent(const sp<SyncEvent>& event) const;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 98bf96e..67a5e58 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -50,7 +50,6 @@
 
     enum track_type {
         TYPE_DEFAULT,
-        TYPE_TIMED,
         TYPE_OUTPUT,
         TYPE_PATCH,
     };
@@ -62,7 +61,7 @@
                                 audio_channel_mask_t channelMask,
                                 size_t frameCount,
                                 void *buffer,
-                                int sessionId,
+                                audio_session_t sessionId,
                                 int uid,
                                 IAudioFlinger::track_flags_t flags,
                                 bool isOut,
@@ -72,18 +71,17 @@
     virtual status_t    initCheck() const;
 
     virtual status_t    start(AudioSystem::sync_event_t event,
-                             int triggerSession) = 0;
+                             audio_session_t triggerSession) = 0;
     virtual void        stop() = 0;
             sp<IMemory> getCblk() const { return mCblkMemory; }
             audio_track_cblk_t* cblk() const { return mCblk; }
-            int         sessionId() const { return mSessionId; }
+            audio_session_t sessionId() const { return mSessionId; }
             int         uid() const { return mUid; }
     virtual status_t    setSyncEvent(const sp<SyncEvent>& event);
 
             sp<IMemory> getBuffers() const { return mBufferMemory; }
             void*       buffer() const { return mBuffer; }
             bool        isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
-            bool        isTimedTrack() const { return (mType == TYPE_TIMED); }
             bool        isOutputTrack() const { return (mType == TYPE_OUTPUT); }
             bool        isPatchTrack() const { return (mType == TYPE_PATCH); }
             bool        isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
@@ -93,7 +91,7 @@
                         TrackBase& operator = (const TrackBase&);
 
     // AudioBufferProvider interface
-    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0;
+    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
     virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
 
     // ExtendedAudioBufferProvider interface is only needed for Track,
@@ -132,7 +130,7 @@
     }
 
     bool isOut() const { return mIsOut; }
-                                    // true for Track and TimedTrack, false for RecordTrack,
+                                    // true for Track, false for RecordTrack,
                                     // this could be a track type if needed later
 
     const wp<ThreadBase> mThread;
@@ -155,7 +153,7 @@
     const size_t        mFrameCount;// size of track buffer given at createTrack() or
                                     // openRecord(), and then adjusted as needed
 
-    const int           mSessionId;
+    const audio_session_t mSessionId;
     int                 mUid;
     Vector < sp<SyncEvent> >mSyncEvents;
     const IAudioFlinger::track_flags_t mFlags;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 0e24b52..364e339 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -27,9 +27,6 @@
 
 #include <private/media/AudioTrackShared.h>
 
-#include <common_time/cc_helper.h>
-#include <common_time/local_clock.h>
-
 #include "AudioMixer.h"
 #include "AudioFlinger.h"
 #include "ServiceUtilities.h"
@@ -53,6 +50,10 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
+// TODO move to a common header  (Also shared with AudioTrack.cpp)
+#define NANOS_PER_SECOND    1000000000
+#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * NANOS_PER_SECOND + time.tv_nsec)
+
 namespace android {
 
 // ----------------------------------------------------------------------------
@@ -70,7 +71,7 @@
             audio_channel_mask_t channelMask,
             size_t frameCount,
             void *buffer,
-            int sessionId,
+            audio_session_t sessionId,
             int clientUid,
             IAudioFlinger::track_flags_t flags,
             bool isOut,
@@ -88,7 +89,7 @@
         mChannelCount(isOut ?
                 audio_channel_count_from_out_mask(channelMask) :
                 audio_channel_count_from_in_mask(channelMask)),
-        mFrameSize(audio_is_linear_pcm(format) ?
+        mFrameSize(audio_has_proportional_frames(format) ?
                 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
         mFrameCount(frameCount),
         mSessionId(sessionId),
@@ -100,13 +101,11 @@
         mType(type),
         mThreadIoHandle(thread->id())
 {
-    // if the caller is us, trust the specified uid
-    if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) {
-        int newclientUid = IPCThreadState::self()->getCallingUid();
-        if (clientUid != -1 && clientUid != newclientUid) {
-            ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid);
-        }
-        clientUid = newclientUid;
+    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+    if (!isTrustedCallingUid(callingUid) || clientUid == -1) {
+        ALOGW_IF(clientUid != -1 && clientUid != (int)callingUid,
+                "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
+        clientUid = (int)callingUid;
     }
     // clientUid contains the uid of the app that is responsible for this track, so we can blame
     // battery usage on it.
@@ -123,7 +122,7 @@
         mCblkMemory = client->heap()->allocate(size);
         if (mCblkMemory == 0 ||
                 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
-            ALOGE("not enough memory for AudioTrack size=%u", size);
+            ALOGE("not enough memory for AudioTrack size=%zu", size);
             client->heap()->dump("AudioTrack");
             mCblkMemory.clear();
             return;
@@ -244,7 +243,7 @@
 
 // AudioBufferProvider interface
 // getNextBuffer() = 0;
-// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
+// This implementation of releaseBuffer() is used by Track and RecordTrack
 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
 {
 #ifdef TEE_SINK
@@ -310,43 +309,6 @@
     return mTrack->attachAuxEffect(EffectId);
 }
 
-status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
-                                                         sp<IMemory>* buffer) {
-    if (!mTrack->isTimedTrack())
-        return INVALID_OPERATION;
-
-    PlaybackThread::TimedTrack* tt =
-            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
-    return tt->allocateTimedBuffer(size, buffer);
-}
-
-status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
-                                                     int64_t pts) {
-    if (!mTrack->isTimedTrack())
-        return INVALID_OPERATION;
-
-    if (buffer == 0 || buffer->pointer() == NULL) {
-        ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()");
-        return BAD_VALUE;
-    }
-
-    PlaybackThread::TimedTrack* tt =
-            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
-    return tt->queueTimedBuffer(buffer, pts);
-}
-
-status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
-    const LinearTransform& xform, int target) {
-
-    if (!mTrack->isTimedTrack())
-        return INVALID_OPERATION;
-
-    PlaybackThread::TimedTrack* tt =
-            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
-    return tt->setMediaTimeTransform(
-        xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
-}
-
 status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
     return mTrack->setParameters(keyValuePairs);
 }
@@ -381,7 +343,7 @@
             size_t frameCount,
             void *buffer,
             const sp<IMemory>& sharedBuffer,
-            int sessionId,
+            audio_session_t sessionId,
             int uid,
             IAudioFlinger::track_flags_t flags,
             track_type type)
@@ -399,6 +361,8 @@
     mAuxBuffer(NULL),
     mAuxEffectId(0), mHasVolumeController(false),
     mPresentationCompleteFrames(0),
+    mFrameMap(16 /* sink-frame-to-track-frame map memory */),
+    // mSinkTimestamp
     mFastIndex(-1),
     mCachedVolume(1.0),
     mIsInvalid(false),
@@ -409,7 +373,7 @@
     // client == 0 implies sharedBuffer == 0
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
 
-    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
             sharedBuffer->size());
 
     if (mCblk == NULL) {
@@ -438,7 +402,7 @@
         //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
         ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
         int i = __builtin_ctz(thread->mFastTrackAvailMask);
-        ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
+        ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
         // FIXME This is too eager.  We allocate a fast track index before the
         //       fast track becomes active.  Since fast tracks are a scarce resource,
         //       this means we are potentially denying other more important fast tracks from
@@ -490,7 +454,7 @@
             wasActive = playbackThread->destroyTrack_l(this);
         }
         if (isExternalTrack() && !wasActive) {
-            AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, (audio_session_t)mSessionId);
+            AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, mSessionId);
         }
     }
 }
@@ -592,7 +556,7 @@
 
 // AudioBufferProvider interface
 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
-        AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
+        AudioBufferProvider::Buffer* buffer)
 {
     ServerProxy::Buffer buf;
     size_t desiredFrames = buffer->frameCount;
@@ -602,7 +566,10 @@
     buffer->raw = buf.mRaw;
     if (buf.mFrameCount == 0) {
         mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
+    } else {
+        mAudioTrackServerProxy->tallyUnderrunFrames(0);
     }
+
     return status;
 }
 
@@ -623,11 +590,20 @@
     return mAudioTrackServerProxy->framesReady();
 }
 
-size_t AudioFlinger::PlaybackThread::Track::framesReleased() const
+int64_t AudioFlinger::PlaybackThread::Track::framesReleased() const
 {
     return mAudioTrackServerProxy->framesReleased();
 }
 
+void AudioFlinger::PlaybackThread::Track::onTimestamp(const ExtendedTimestamp &timestamp)
+{
+    // This call comes from a FastTrack and should be kept lockless.
+    // The server side frames are already translated to client frames.
+    mAudioTrackServerProxy->setTimestamp(timestamp);
+
+    // We do not set drained here, as FastTrack timestamp may not go to very last frame.
+}
+
 // Don't call for fast tracks; the framesReady() could result in priority inversion
 bool AudioFlinger::PlaybackThread::Track::isReady() const {
     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
@@ -641,7 +617,7 @@
         return true;
     }
 
-    if (framesReady() >= mFrameCount ||
+    if (framesReady() >= mServerProxy->getBufferSizeInFrames() ||
             (mCblk->mFlags & CBLK_FORCEREADY)) {
         mFillingUpStatus = FS_FILLED;
         android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
@@ -651,7 +627,7 @@
 }
 
 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
-                                                    int triggerSession __unused)
+                                                    audio_session_t triggerSession __unused)
 {
     status_t status = NO_ERROR;
     ALOGV("start(%d), calling pid %d session %d",
@@ -691,6 +667,11 @@
             ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
         }
 
+        // states to reset position info for non-offloaded/direct tracks
+        if (!isOffloaded() && !isDirect()
+                && (state == IDLE || state == STOPPED || state == FLUSHED)) {
+            mFrameMap.reset();
+        }
         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
         if (isFastTrack()) {
             // refresh fast track underruns on start because that field is never cleared
@@ -747,6 +728,9 @@
                 // For an offloaded track this starts a drain and state will
                 // move to STOPPING_2 when drain completes and then STOPPED
                 mState = STOPPING_1;
+                if (isOffloaded()) {
+                    mRetryCount = PlaybackThread::kMaxTrackStopRetriesOffload;
+                }
             }
             playbackThread->broadcast_l();
             ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
@@ -811,11 +795,6 @@
                 mState = ACTIVE;
             }
 
-            if (mState == ACTIVE) {
-                ALOGV("flush called in active state, resetting buffer time out retry count");
-                mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
-            }
-
             mFlushHwPending = true;
             mResumeToStopping = false;
         } else {
@@ -885,9 +864,8 @@
 
 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
 {
-    // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
-    if (isFastTrack()) {
-        return INVALID_OPERATION;
+    if (!isOffloaded() && !isDirect()) {
+        return INVALID_OPERATION; // normal tracks handled through SSQ
     }
     sp<ThreadBase> thread = mThread.promote();
     if (thread == 0) {
@@ -896,38 +874,7 @@
 
     Mutex::Autolock _l(thread->mLock);
     PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-
-    status_t result = INVALID_OPERATION;
-    if (!isOffloaded() && !isDirect()) {
-        if (!playbackThread->mLatchQValid) {
-            return INVALID_OPERATION;
-        }
-        // FIXME Not accurate under dynamic changes of sample rate and speed.
-        // Do not use track's mSampleRate as it is not current for mixer tracks.
-        uint32_t sampleRate = mAudioTrackServerProxy->getSampleRate();
-        AudioPlaybackRate playbackRate = mAudioTrackServerProxy->getPlaybackRate();
-        uint32_t unpresentedFrames = ((double) playbackThread->mLatchQ.mUnpresentedFrames *
-                sampleRate * playbackRate.mSpeed)/ playbackThread->mSampleRate;
-        // FIXME Since we're using a raw pointer as the key, it is theoretically possible
-        //       for a brand new track to share the same address as a recently destroyed
-        //       track, and thus for us to get the frames released of the wrong track.
-        //       It is unlikely that we would be able to call getTimestamp() so quickly
-        //       right after creating a new track.  Nevertheless, the index here should
-        //       be changed to something that is unique.  Or use a completely different strategy.
-        ssize_t i = playbackThread->mLatchQ.mFramesReleased.indexOfKey(this);
-        uint32_t framesWritten = i >= 0 ?
-                playbackThread->mLatchQ.mFramesReleased[i] :
-                mAudioTrackServerProxy->framesReleased();
-        if (framesWritten >= unpresentedFrames) {
-            timestamp.mPosition = framesWritten - unpresentedFrames;
-            timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
-            result = NO_ERROR;
-        }
-    } else { // offloaded or direct
-        result = playbackThread->getTimestamp_l(timestamp);
-    }
-
-    return result;
+    return playbackThread->getTimestamp_l(timestamp);
 }
 
 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
@@ -990,9 +937,12 @@
     mAuxBuffer = buffer;
 }
 
-bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
-                                                         size_t audioHalFrames)
+bool AudioFlinger::PlaybackThread::Track::presentationComplete(
+        int64_t framesWritten, size_t audioHalFrames)
 {
+    // TODO: improve this based on FrameMap if it exists, to ensure full drain.
+    // This assists in proper timestamp computation as well as wakelock management.
+
     // a track is considered presented when the total number of frames written to audio HAL
     // corresponds to the number of frames written when presentationComplete() is called for the
     // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
@@ -1000,15 +950,25 @@
     // to detect when all frames have been played. In this case framesWritten isn't
     // useful because it doesn't always reflect whether there is data in the h/w
     // buffers, particularly if a track has been paused and resumed during draining
-    ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
-                      mPresentationCompleteFrames, framesWritten);
+    ALOGV("presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
+            (long long)mPresentationCompleteFrames, (long long)framesWritten);
     if (mPresentationCompleteFrames == 0) {
         mPresentationCompleteFrames = framesWritten + audioHalFrames;
-        ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
-                  mPresentationCompleteFrames, audioHalFrames);
+        ALOGV("presentationComplete() reset: mPresentationCompleteFrames %lld audioHalFrames %zu",
+                (long long)mPresentationCompleteFrames, audioHalFrames);
     }
 
-    if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
+    bool complete;
+    if (isOffloaded()) {
+        complete = true;
+    } else if (isDirect() || isFastTrack()) { // these do not go through linear map
+        complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
+    } else {  // Normal tracks, OutputTracks, and PatchTracks
+        complete = framesWritten >= (int64_t) mPresentationCompleteFrames
+                && mAudioTrackServerProxy->isDrained();
+    }
+
+    if (complete) {
         triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
         mAudioTrackServerProxy->setStreamEndDone();
         return true;
@@ -1059,7 +1019,7 @@
     if (isTerminated() || mState == PAUSED ||
             ((framesReady() == 0) && ((mSharedBuffer != 0) ||
                                       (mState == STOPPED)))) {
-        ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
+        ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %zu",
               mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
         event->cancel();
         return INVALID_OPERATION;
@@ -1070,13 +1030,23 @@
 
 void AudioFlinger::PlaybackThread::Track::invalidate()
 {
+    signalClientFlag(CBLK_INVALID);
+    mIsInvalid = true;
+}
+
+void AudioFlinger::PlaybackThread::Track::disable()
+{
+    signalClientFlag(CBLK_DISABLED);
+}
+
+void AudioFlinger::PlaybackThread::Track::signalClientFlag(int32_t flag)
+{
     // FIXME should use proxy, and needs work
     audio_track_cblk_t* cblk = mCblk;
-    android_atomic_or(CBLK_INVALID, &cblk->mFlags);
+    android_atomic_or(flag, &cblk->mFlags);
     android_atomic_release_store(0x40000000, &cblk->mFutex);
     // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
     (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
-    mIsInvalid = true;
 }
 
 void AudioFlinger::PlaybackThread::Track::signal()
@@ -1116,526 +1086,41 @@
         mResumeToStopping = false;
     }
 }
-// ----------------------------------------------------------------------------
 
-sp<AudioFlinger::PlaybackThread::TimedTrack>
-AudioFlinger::PlaybackThread::TimedTrack::create(
-            PlaybackThread *thread,
-            const sp<Client>& client,
-            audio_stream_type_t streamType,
-            uint32_t sampleRate,
-            audio_format_t format,
-            audio_channel_mask_t channelMask,
-            size_t frameCount,
-            const sp<IMemory>& sharedBuffer,
-            int sessionId,
-            int uid)
-{
-    if (!client->reserveTimedTrack())
-        return 0;
+//To be called with thread lock held
+void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
+        int64_t trackFramesReleased, int64_t sinkFramesWritten,
+        const ExtendedTimestamp &timeStamp) {
+    //update frame map
+    mFrameMap.push(trackFramesReleased, sinkFramesWritten);
 
-    return new TimedTrack(
-        thread, client, streamType, sampleRate, format, channelMask, frameCount,
-        sharedBuffer, sessionId, uid);
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
-            PlaybackThread *thread,
-            const sp<Client>& client,
-            audio_stream_type_t streamType,
-            uint32_t sampleRate,
-            audio_format_t format,
-            audio_channel_mask_t channelMask,
-            size_t frameCount,
-            const sp<IMemory>& sharedBuffer,
-            int sessionId,
-            int uid)
-    : Track(thread, client, streamType, sampleRate, format, channelMask,
-            frameCount, (sharedBuffer != 0) ? sharedBuffer->pointer() : NULL, sharedBuffer,
-                    sessionId, uid, IAudioFlinger::TRACK_TIMED, TYPE_TIMED),
-      mQueueHeadInFlight(false),
-      mTrimQueueHeadOnRelease(false),
-      mFramesPendingInQueue(0),
-      mTimedSilenceBuffer(NULL),
-      mTimedSilenceBufferSize(0),
-      mTimedAudioOutputOnTime(false),
-      mMediaTimeTransformValid(false)
-{
-    LocalClock lc;
-    mLocalTimeFreq = lc.getLocalFreq();
-
-    mLocalTimeToSampleTransform.a_zero = 0;
-    mLocalTimeToSampleTransform.b_zero = 0;
-    mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
-    mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
-    LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
-                            &mLocalTimeToSampleTransform.a_to_b_denom);
-
-    mMediaTimeToSampleTransform.a_zero = 0;
-    mMediaTimeToSampleTransform.b_zero = 0;
-    mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
-    mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
-    LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
-                            &mMediaTimeToSampleTransform.a_to_b_denom);
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
-    mClient->releaseTimedTrack();
-    delete [] mTimedSilenceBuffer;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
-    size_t size, sp<IMemory>* buffer) {
-
-    Mutex::Autolock _l(mTimedBufferQueueLock);
-
-    trimTimedBufferQueue_l();
-
-    // lazily initialize the shared memory heap for timed buffers
-    if (mTimedMemoryDealer == NULL) {
-        const int kTimedBufferHeapSize = 512 << 10;
-
-        mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
-                                              "AudioFlingerTimed");
-        if (mTimedMemoryDealer == NULL) {
-            return NO_MEMORY;
-        }
-    }
-
-    sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
-    if (newBuffer == 0 || newBuffer->pointer() == NULL) {
-        return NO_MEMORY;
-    }
-
-    *buffer = newBuffer;
-    return NO_ERROR;
-}
-
-// caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
-    int64_t mediaTimeNow;
-    {
-        Mutex::Autolock mttLock(mMediaTimeTransformLock);
-        if (!mMediaTimeTransformValid)
-            return;
-
-        int64_t targetTimeNow;
-        status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
-            ? mCCHelper.getCommonTime(&targetTimeNow)
-            : mCCHelper.getLocalTime(&targetTimeNow);
-
-        if (OK != res)
-            return;
-
-        if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
-                                                    &mediaTimeNow)) {
-            return;
-        }
-    }
-
-    size_t trimEnd;
-    for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
-        int64_t bufEnd;
-
-        if ((trimEnd + 1) < mTimedBufferQueue.size()) {
-            // We have a next buffer.  Just use its PTS as the PTS of the frame
-            // following the last frame in this buffer.  If the stream is sparse
-            // (ie, there are deliberate gaps left in the stream which should be
-            // filled with silence by the TimedAudioTrack), then this can result
-            // in one extra buffer being left un-trimmed when it could have
-            // been.  In general, this is not typical, and we would rather
-            // optimized away the TS calculation below for the more common case
-            // where PTSes are contiguous.
-            bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
-        } else {
-            // We have no next buffer.  Compute the PTS of the frame following
-            // the last frame in this buffer by computing the duration of of
-            // this frame in media time units and adding it to the PTS of the
-            // buffer.
-            int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
-                               / mFrameSize;
-
-            if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
-                                                                &bufEnd)) {
-                ALOGE("Failed to convert frame count of %lld to media time"
-                      " duration" " (scale factor %d/%u) in %s",
-                      frameCount,
-                      mMediaTimeToSampleTransform.a_to_b_numer,
-                      mMediaTimeToSampleTransform.a_to_b_denom,
-                      __PRETTY_FUNCTION__);
-                break;
-            }
-            bufEnd += mTimedBufferQueue[trimEnd].pts();
-        }
-
-        if (bufEnd > mediaTimeNow)
-            break;
-
-        // Is the buffer we want to use in the middle of a mix operation right
-        // now?  If so, don't actually trim it.  Just wait for the releaseBuffer
-        // from the mixer which should be coming back shortly.
-        if (!trimEnd && mQueueHeadInFlight) {
-            mTrimQueueHeadOnRelease = true;
-        }
-    }
-
-    size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
-    if (trimStart < trimEnd) {
-        // Update the bookkeeping for framesReady()
-        for (size_t i = trimStart; i < trimEnd; ++i) {
-            updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
-        }
-
-        // Now actually remove the buffers from the queue.
-        mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
-    }
-}
-
-void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
-        const char* logTag) {
-    ALOG_ASSERT(mTimedBufferQueue.size() > 0,
-                "%s called (reason \"%s\"), but timed buffer queue has no"
-                " elements to trim.", __FUNCTION__, logTag);
-
-    updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
-    mTimedBufferQueue.removeAt(0);
-}
-
-void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
-        const TimedBuffer& buf,
-        const char* logTag __unused) {
-    uint32_t bufBytes        = buf.buffer()->size();
-    uint32_t consumedAlready = buf.position();
-
-    ALOG_ASSERT(consumedAlready <= bufBytes,
-                "Bad bookkeeping while updating frames pending.  Timed buffer is"
-                " only %u bytes long, but claims to have consumed %u"
-                " bytes.  (update reason: \"%s\")",
-                bufBytes, consumedAlready, logTag);
-
-    uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
-    ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
-                "Bad bookkeeping while updating frames pending.  Should have at"
-                " least %u queued frames, but we think we have only %u.  (update"
-                " reason: \"%s\")",
-                bufFrames, mFramesPendingInQueue, logTag);
-
-    mFramesPendingInQueue -= bufFrames;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
-    const sp<IMemory>& buffer, int64_t pts) {
-
-    {
-        Mutex::Autolock mttLock(mMediaTimeTransformLock);
-        if (!mMediaTimeTransformValid)
-            return INVALID_OPERATION;
-    }
-
-    Mutex::Autolock _l(mTimedBufferQueueLock);
-
-    uint32_t bufFrames = buffer->size() / mFrameSize;
-    mFramesPendingInQueue += bufFrames;
-    mTimedBufferQueue.add(TimedBuffer(buffer, pts));
-
-    return NO_ERROR;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
-    const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
-
-    ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
-           xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
-           target);
-
-    if (!(target == TimedAudioTrack::LOCAL_TIME ||
-          target == TimedAudioTrack::COMMON_TIME)) {
-        return BAD_VALUE;
-    }
-
-    Mutex::Autolock lock(mMediaTimeTransformLock);
-    mMediaTimeTransform = xform;
-    mMediaTimeTransformTarget = target;
-    mMediaTimeTransformValid = true;
-
-    return NO_ERROR;
-}
-
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
-// implementation of getNextBuffer for tracks whose buffers have timestamps
-status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
-    AudioBufferProvider::Buffer* buffer, int64_t pts)
-{
-    if (pts == AudioBufferProvider::kInvalidPTS) {
-        buffer->raw = NULL;
-        buffer->frameCount = 0;
-        mTimedAudioOutputOnTime = false;
-        return INVALID_OPERATION;
-    }
-
-    Mutex::Autolock _l(mTimedBufferQueueLock);
-
-    ALOG_ASSERT(!mQueueHeadInFlight,
-                "getNextBuffer called without releaseBuffer!");
-
-    while (true) {
-
-        // if we have no timed buffers, then fail
-        if (mTimedBufferQueue.isEmpty()) {
-            buffer->raw = NULL;
-            buffer->frameCount = 0;
-            return NOT_ENOUGH_DATA;
-        }
-
-        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
-
-        // calculate the PTS of the head of the timed buffer queue expressed in
-        // local time
-        int64_t headLocalPTS;
-        {
-            Mutex::Autolock mttLock(mMediaTimeTransformLock);
-
-            ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
-
-            if (mMediaTimeTransform.a_to_b_denom == 0) {
-                // the transform represents a pause, so yield silence
-                timedYieldSilence_l(buffer->frameCount, buffer);
-                return NO_ERROR;
-            }
-
-            int64_t transformedPTS;
-            if (!mMediaTimeTransform.doForwardTransform(head.pts(),
-                                                        &transformedPTS)) {
-                // the transform failed.  this shouldn't happen, but if it does
-                // then just drop this buffer
-                ALOGW("timedGetNextBuffer transform failed");
-                buffer->raw = NULL;
-                buffer->frameCount = 0;
-                trimTimedBufferQueueHead_l("getNextBuffer; no transform");
-                return NO_ERROR;
-            }
-
-            if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
-                if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
-                                                          &headLocalPTS)) {
-                    buffer->raw = NULL;
-                    buffer->frameCount = 0;
-                    return INVALID_OPERATION;
-                }
-            } else {
-                headLocalPTS = transformedPTS;
-            }
-        }
-
-        uint32_t sr = sampleRate();
-
-        // adjust the head buffer's PTS to reflect the portion of the head buffer
-        // that has already been consumed
-        int64_t effectivePTS = headLocalPTS +
-                ((head.position() / mFrameSize) * mLocalTimeFreq / sr);
-
-        // Calculate the delta in samples between the head of the input buffer
-        // queue and the start of the next output buffer that will be written.
-        // If the transformation fails because of over or underflow, it means
-        // that the sample's position in the output stream is so far out of
-        // whack that it should just be dropped.
-        int64_t sampleDelta;
-        if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
-            ALOGV("*** head buffer is too far from PTS: dropped buffer");
-            trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
-                                       " mix");
-            continue;
-        }
-        if (!mLocalTimeToSampleTransform.doForwardTransform(
-                (effectivePTS - pts) << 32, &sampleDelta)) {
-            ALOGV("*** too late during sample rate transform: dropped buffer");
-            trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
-            continue;
-        }
-
-        ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
-               " sampleDelta=[%d.%08x]",
-               head.pts(), head.position(), pts,
-               static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
-                   + (sampleDelta >> 32)),
-               static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
-
-        // if the delta between the ideal placement for the next input sample and
-        // the current output position is within this threshold, then we will
-        // concatenate the next input samples to the previous output
-        const int64_t kSampleContinuityThreshold =
-                (static_cast<int64_t>(sr) << 32) / 250;
-
-        // if this is the first buffer of audio that we're emitting from this track
-        // then it should be almost exactly on time.
-        const int64_t kSampleStartupThreshold = 1LL << 32;
-
-        if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
-           (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
-            // the next input is close enough to being on time, so concatenate it
-            // with the last output
-            timedYieldSamples_l(buffer);
-
-            ALOGVV("*** on time: head.pos=%d frameCount=%u",
-                    head.position(), buffer->frameCount);
-            return NO_ERROR;
-        }
-
-        // Looks like our output is not on time.  Reset our on timed status.
-        // Next time we mix samples from our input queue, then should be within
-        // the StartupThreshold.
-        mTimedAudioOutputOnTime = false;
-        if (sampleDelta > 0) {
-            // the gap between the current output position and the proper start of
-            // the next input sample is too big, so fill it with silence
-            uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
-
-            timedYieldSilence_l(framesUntilNextInput, buffer);
-            ALOGV("*** silence: frameCount=%u", buffer->frameCount);
-            return NO_ERROR;
-        } else {
-            // the next input sample is late
-            uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
-            size_t onTimeSamplePosition =
-                    head.position() + lateFrames * mFrameSize;
-
-            if (onTimeSamplePosition > head.buffer()->size()) {
-                // all the remaining samples in the head are too late, so
-                // drop it and move on
-                ALOGV("*** too late: dropped buffer");
-                trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
-                continue;
-            } else {
-                // skip over the late samples
-                head.setPosition(onTimeSamplePosition);
-
-                // yield the available samples
-                timedYieldSamples_l(buffer);
-
-                ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
-                return NO_ERROR;
+    // adjust server times and set drained state.
+    //
+    // Our timestamps are only updated when the track is on the Thread active list.
+    // We need to ensure that tracks are not removed before full drain.
+    ExtendedTimestamp local = timeStamp;
+    bool checked = false;
+    for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
+            i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
+        // Lookup the track frame corresponding to the sink frame position.
+        if (local.mTimeNs[i] > 0) {
+            local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
+            // check drain state from the latest stage in the pipeline.
+            if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
+                mAudioTrackServerProxy->setDrained(
+                        local.mPosition[i] >= mAudioTrackServerProxy->framesReleased());
+                checked = true;
             }
         }
     }
-}
-
-// Yield samples from the timed buffer queue head up to the given output
-// buffer's capacity.
-//
-// Caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
-    AudioBufferProvider::Buffer* buffer) {
-
-    const TimedBuffer& head = mTimedBufferQueue[0];
-
-    buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
-                   head.position());
-
-    uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
-                                 mFrameSize);
-    size_t framesRequested = buffer->frameCount;
-    buffer->frameCount = min(framesLeftInHead, framesRequested);
-
-    mQueueHeadInFlight = true;
-    mTimedAudioOutputOnTime = true;
-}
-
-// Yield samples of silence up to the given output buffer's capacity
-//
-// Caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
-    uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
-
-    // lazily allocate a buffer filled with silence
-    if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
-        delete [] mTimedSilenceBuffer;
-        mTimedSilenceBufferSize = numFrames * mFrameSize;
-        mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
-        memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
+    if (!checked) { // no server info, assume drained.
+        mAudioTrackServerProxy->setDrained(true);
     }
-
-    buffer->raw = mTimedSilenceBuffer;
-    size_t framesRequested = buffer->frameCount;
-    buffer->frameCount = min(numFrames, framesRequested);
-
-    mTimedAudioOutputOnTime = false;
+    // Set correction for flushed frames that are not accounted for in released.
+    local.mFlushed = mAudioTrackServerProxy->framesFlushed();
+    mServerProxy->setTimestamp(local);
 }
 
-// AudioBufferProvider interface
-void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
-    AudioBufferProvider::Buffer* buffer) {
-
-    Mutex::Autolock _l(mTimedBufferQueueLock);
-
-    // If the buffer which was just released is part of the buffer at the head
-    // of the queue, be sure to update the amt of the buffer which has been
-    // consumed.  If the buffer being returned is not part of the head of the
-    // queue, its either because the buffer is part of the silence buffer, or
-    // because the head of the timed queue was trimmed after the mixer called
-    // getNextBuffer but before the mixer called releaseBuffer.
-    if (buffer->raw == mTimedSilenceBuffer) {
-        ALOG_ASSERT(!mQueueHeadInFlight,
-                    "Queue head in flight during release of silence buffer!");
-        goto done;
-    }
-
-    ALOG_ASSERT(mQueueHeadInFlight,
-                "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
-                " head in flight.");
-
-    if (mTimedBufferQueue.size()) {
-        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
-
-        void* start = head.buffer()->pointer();
-        void* end   = reinterpret_cast<void*>(
-                        reinterpret_cast<uint8_t*>(head.buffer()->pointer())
-                        + head.buffer()->size());
-
-        ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
-                    "released buffer not within the head of the timed buffer"
-                    " queue; qHead = [%p, %p], released buffer = %p",
-                    start, end, buffer->raw);
-
-        head.setPosition(head.position() +
-                (buffer->frameCount * mFrameSize));
-        mQueueHeadInFlight = false;
-
-        ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
-                    "Bad bookkeeping during releaseBuffer!  Should have at"
-                    " least %u queued frames, but we think we have only %u",
-                    buffer->frameCount, mFramesPendingInQueue);
-
-        mFramesPendingInQueue -= buffer->frameCount;
-
-        if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
-            || mTrimQueueHeadOnRelease) {
-            trimTimedBufferQueueHead_l("releaseBuffer");
-            mTrimQueueHeadOnRelease = false;
-        }
-    } else {
-        LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
-                  " buffers in the timed buffer queue");
-    }
-
-done:
-    buffer->raw = 0;
-    buffer->frameCount = 0;
-}
-
-size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
-    Mutex::Autolock _l(mTimedBufferQueueLock);
-    return mFramesPendingInQueue;
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
-        : mPTS(0), mPosition(0) {}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
-    const sp<IMemory>& buffer, int64_t pts)
-        : mBuffer(buffer), mPTS(pts), mPosition(0) {}
-
-
 // ----------------------------------------------------------------------------
 
 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
@@ -1648,7 +1133,8 @@
             int uid)
     :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
               sampleRate, format, channelMask, frameCount,
-              NULL, 0, 0, uid, IAudioFlinger::TRACK_DEFAULT, TYPE_OUTPUT),
+              NULL, 0, AUDIO_SESSION_NONE, uid, IAudioFlinger::TRACK_DEFAULT,
+              TYPE_OUTPUT),
     mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
 {
 
@@ -1656,7 +1142,7 @@
         mOutBuffer.frameCount = 0;
         playbackThread->mTracks.add(this);
         ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
-                "frameCount %u, mChannelMask 0x%08x",
+                "frameCount %zu, mChannelMask 0x%08x",
                 mCblk, mBuffer,
                 frameCount, mChannelMask);
         // since client and server are in the same process,
@@ -1679,7 +1165,7 @@
 }
 
 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
-                                                          int triggerSession)
+                                                          audio_session_t triggerSession)
 {
     status_t status = Track::start(event, triggerSession);
     if (status != NO_ERROR) {
@@ -1729,7 +1215,7 @@
             mOutBuffer.frameCount = pInBuffer->frameCount;
             nsecs_t startTime = systemTime();
             status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
-            if (status != NO_ERROR) {
+            if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
                 ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
                         mThread.unsafe_get(), status);
                 outputBufferFull = true;
@@ -1741,6 +1227,10 @@
             } else {
                 waitTimeLeftMs = 0;
             }
+            if (status == NOT_ENOUGH_DATA) {
+                restartIfDisabled();
+                continue;
+            }
         }
 
         uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
@@ -1750,6 +1240,7 @@
         buf.mFrameCount = outFrames;
         buf.mRaw = NULL;
         mClientProxy->releaseBuffer(&buf);
+        restartIfDisabled();
         pInBuffer->frameCount -= outFrames;
         pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
         mOutBuffer.frameCount -= outFrames;
@@ -1760,7 +1251,7 @@
                 mBufferQueue.removeAt(0);
                 free(pInBuffer->mBuffer);
                 delete pInBuffer;
-                ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
+                ALOGV("OutputTrack::write() %p thread %p released overflow buffer %zu", this,
                         mThread.unsafe_get(), mBufferQueue.size());
             } else {
                 break;
@@ -1779,7 +1270,7 @@
                 pInBuffer->raw = pInBuffer->mBuffer;
                 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
                 mBufferQueue.add(pInBuffer);
-                ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
+                ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %zu", this,
                         mThread.unsafe_get(), mBufferQueue.size());
             } else {
                 ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
@@ -1823,6 +1314,13 @@
     mBufferQueue.clear();
 }
 
+void AudioFlinger::PlaybackThread::OutputTrack::restartIfDisabled()
+{
+    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+    if (mActive && (flags & CBLK_DISABLED)) {
+        start();
+    }
+}
 
 AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
                                                      audio_stream_type_t streamType,
@@ -1834,7 +1332,7 @@
                                                      IAudioFlinger::track_flags_t flags)
     :   Track(playbackThread, NULL, streamType,
               sampleRate, format, channelMask, frameCount,
-              buffer, 0, 0, getuid(), flags, TYPE_PATCH),
+              buffer, 0, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
               mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
 {
     uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
@@ -1852,9 +1350,20 @@
 {
 }
 
+status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
+                                                          audio_session_t triggerSession)
+{
+    status_t status = Track::start(event, triggerSession);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+    return status;
+}
+
 // AudioBufferProvider interface
 status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
-        AudioBufferProvider::Buffer* buffer, int64_t pts)
+        AudioBufferProvider::Buffer* buffer)
 {
     ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::getNextBuffer() called without peer proxy");
     Proxy::Buffer buf;
@@ -1865,7 +1374,7 @@
     if (buf.mFrameCount == 0) {
         return WOULD_BLOCK;
     }
-    status = Track::getNextBuffer(buffer, pts);
+    status = Track::getNextBuffer(buffer);
     return status;
 }
 
@@ -1882,17 +1391,31 @@
 status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
                                                                 const struct timespec *timeOut)
 {
-    return mProxy->obtainBuffer(buffer, timeOut);
+    status_t status = NO_ERROR;
+    static const int32_t kMaxTries = 5;
+    int32_t tryCounter = kMaxTries;
+    do {
+        if (status == NOT_ENOUGH_DATA) {
+            restartIfDisabled();
+        }
+        status = mProxy->obtainBuffer(buffer, timeOut);
+    } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
+    return status;
 }
 
 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
 {
     mProxy->releaseBuffer(buffer);
+    restartIfDisabled();
+    android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
+}
+
+void AudioFlinger::PlaybackThread::PatchTrack::restartIfDisabled()
+{
     if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
         ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting");
         start();
     }
-    android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
 }
 
 // ----------------------------------------------------------------------------
@@ -1912,7 +1435,7 @@
 }
 
 status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
-        int triggerSession) {
+        audio_session_t triggerSession) {
     ALOGV("RecordHandle::start()");
     return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
 }
@@ -1943,7 +1466,7 @@
             audio_channel_mask_t channelMask,
             size_t frameCount,
             void *buffer,
-            int sessionId,
+            audio_session_t sessionId,
             int uid,
             IAudioFlinger::track_flags_t flags,
             track_type type)
@@ -1978,7 +1501,8 @@
     }
 
     mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
-                                              mFrameSize, !isExternalTrack());
+            mFrameSize, !isExternalTrack());
+
     mResamplerBufferProvider = new ResamplerBufferProvider(this);
 
     if (flags & IAudioFlinger::TRACK_FAST) {
@@ -2004,8 +1528,7 @@
 }
 
 // AudioBufferProvider interface
-status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
-        int64_t pts __unused)
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
 {
     ServerProxy::Buffer buf;
     buf.mFrameCount = buffer->frameCount;
@@ -2020,7 +1543,7 @@
 }
 
 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
-                                                        int triggerSession)
+                                                        audio_session_t triggerSession)
 {
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
@@ -2037,7 +1560,7 @@
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
         if (recordThread->stop(this) && isExternalTrack()) {
-            AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
+            AudioSystem::stopInput(mThreadIoHandle, mSessionId);
         }
     }
 }
@@ -2049,9 +1572,9 @@
     {
         if (isExternalTrack()) {
             if (mState == ACTIVE || mState == RESUMING) {
-                AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
+                AudioSystem::stopInput(mThreadIoHandle, mSessionId);
             }
-            AudioSystem::releaseInput(mThreadIoHandle, (audio_session_t)mSessionId);
+            AudioSystem::releaseInput(mThreadIoHandle, mSessionId);
         }
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
@@ -2116,6 +1639,24 @@
     mFramesToDrop = 0;
 }
 
+void AudioFlinger::RecordThread::RecordTrack::updateTrackFrameInfo(
+        int64_t trackFramesReleased, int64_t sourceFramesRead,
+        uint32_t halSampleRate, const ExtendedTimestamp &timestamp)
+{
+    ExtendedTimestamp local = timestamp;
+
+    // Convert HAL frames to server-side track frames at track sample rate.
+    // We use trackFramesReleased and sourceFramesRead as an anchor point.
+    for (int i = ExtendedTimestamp::LOCATION_SERVER; i < ExtendedTimestamp::LOCATION_MAX; ++i) {
+        if (local.mTimeNs[i] != 0) {
+            const int64_t relativeServerFrames = local.mPosition[i] - sourceFramesRead;
+            const int64_t relativeTrackFrames = relativeServerFrames
+                    * mSampleRate / halSampleRate; // TODO: potential computation overflow
+            local.mPosition[i] = relativeTrackFrames + trackFramesReleased;
+        }
+    }
+    mServerProxy->setTimestamp(local);
+}
 
 AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
                                                      uint32_t sampleRate,
@@ -2125,7 +1666,7 @@
                                                      void *buffer,
                                                      IAudioFlinger::track_flags_t flags)
     :   RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
-                buffer, 0, getuid(), flags, TYPE_PATCH),
+                buffer, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
                 mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
 {
     uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
@@ -2145,7 +1686,7 @@
 
 // AudioBufferProvider interface
 status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
-                                                  AudioBufferProvider::Buffer* buffer, int64_t pts)
+                                                  AudioBufferProvider::Buffer* buffer)
 {
     ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::getNextBuffer() called without peer proxy");
     Proxy::Buffer buf;
@@ -2157,7 +1698,7 @@
     if (buf.mFrameCount == 0) {
         return WOULD_BLOCK;
     }
-    status = RecordTrack::getNextBuffer(buffer, pts);
+    status = RecordTrack::getNextBuffer(buffer);
     return status;
 }
 
diff --git a/services/audioflinger/audio-resampler/Android.mk b/services/audioflinger/audio-resampler/Android.mk
index ba37b19..bb2807c 100644
--- a/services/audioflinger/audio-resampler/Android.mk
+++ b/services/audioflinger/audio-resampler/Android.mk
@@ -11,4 +11,6 @@
 
 LOCAL_SHARED_LIBRARIES  := libutils liblog
 
+LOCAL_CFLAGS += -Werror -Wall
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp
index 7893778..bae3c5b 100644
--- a/services/audioflinger/test-resample.cpp
+++ b/services/audioflinger/test-resample.cpp
@@ -272,9 +272,7 @@
             mFrameSize(frameSize),
             mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) {
         }
-        virtual status_t getNextBuffer(Buffer* buffer,
-                int64_t pts = kInvalidPTS) {
-            (void)pts; // suppress warning
+        virtual status_t getNextBuffer(Buffer* buffer) {
             size_t requestedFrames = buffer->frameCount;
             if (requestedFrames > mNumFrames - mNextFrame) {
                 buffer->frameCount = mNumFrames - mNextFrame;
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index e152468..3505e0f 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -23,6 +23,8 @@
 LOCAL_MODULE := resampler_tests
 LOCAL_MODULE_TAGS := tests
 
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_NATIVE_TEST)
 
 #
@@ -47,7 +49,6 @@
 LOCAL_SHARED_LIBRARIES := \
 	libeffects \
 	libnbaio \
-	libcommon_time_client \
 	libaudioresampler \
 	libaudioutils \
 	libdl \
@@ -62,4 +63,6 @@
 
 LOCAL_CXX_STL := libc++
 
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_EXECUTABLE)
diff --git a/services/audioflinger/tests/README b/services/audioflinger/tests/README
new file mode 100644
index 0000000..508e960
--- /dev/null
+++ b/services/audioflinger/tests/README
@@ -0,0 +1,13 @@
+For libsonic dependency:
+pushd external/sonic
+mm
+popd
+
+To build resampler library:
+pushd ..
+Optionally uncomment USE_NEON=false in Android.mk
+mm
+popd
+
+Then build here:
+mm
diff --git a/services/audioflinger/tests/run_all_unit_tests.sh b/services/audioflinger/tests/run_all_unit_tests.sh
index ffae6ae..113f39e 100755
--- a/services/audioflinger/tests/run_all_unit_tests.sh
+++ b/services/audioflinger/tests/run_all_unit_tests.sh
@@ -8,4 +8,5 @@
 echo "waiting for device"
 adb root && adb wait-for-device remount
 
-adb shell /system/bin/resampler_tests
+#adb shell /system/bin/resampler_tests
+adb shell /data/nativetest/resampler_tests/resampler_tests
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
index 8da6245..65e22da 100644
--- a/services/audioflinger/tests/test-mixer.cpp
+++ b/services/audioflinger/tests/test-mixer.cpp
@@ -307,7 +307,7 @@
                         (char *) auxAddr + i * auxFrameSize);
             }
         }
-        mixer->process(AudioBufferProvider::kInvalidPTS);
+        mixer->process();
     }
     outputFrames = i; // reset output frames to the data actually produced.
 
diff --git a/services/audioflinger/tests/test_utils.h b/services/audioflinger/tests/test_utils.h
index 3d51cdc..283c768 100644
--- a/services/audioflinger/tests/test_utils.h
+++ b/services/audioflinger/tests/test_utils.h
@@ -112,7 +112,7 @@
         mNextIdx = 0;
     }
 
-    virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS)
+    virtual android::status_t getNextBuffer(Buffer* buffer)
     {
         size_t requestedFrames = buffer->frameCount;
         if (requestedFrames > mNumFrames - mNextFrame) {
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 5b38e1c..8b45adc 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -24,6 +24,7 @@
     $(call include-path-for, audio-utils) \
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
     $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
+    $(TOPDIR)frameworks/av/services/audiopolicy/utilities
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
@@ -44,19 +45,19 @@
     libmedia_helper \
     libaudiopolicycomponents
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
 LOCAL_MODULE:= libaudiopolicyservice
 
 LOCAL_CFLAGS += -fvisibility=hidden
 
 include $(BUILD_SHARED_LIBRARY)
 
-
 ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
 
 include $(CLEAR_VARS)
 
-LOCAL_SRC_FILES:= \
-    managerdefault/AudioPolicyManager.cpp \
+LOCAL_SRC_FILES:= managerdefault/AudioPolicyManager.cpp
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
@@ -66,12 +67,15 @@
 
 ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
 
+ifneq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+$(error Configurable policy does not support legacy conf file)
+endif #ifneq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+
 LOCAL_REQUIRED_MODULES := \
     parameter-framework.policy \
     audio_policy_criteria.conf \
 
-LOCAL_C_INCLUDES += \
-    $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include \
+LOCAL_C_INCLUDES += $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include
 
 LOCAL_SHARED_LIBRARIES += libaudiopolicyengineconfigurable
 
@@ -79,16 +83,27 @@
 
 LOCAL_SHARED_LIBRARIES += libaudiopolicyenginedefault
 
-endif
+endif # ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
 
 LOCAL_C_INCLUDES += \
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
     $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
+    $(TOPDIR)frameworks/av/services/audiopolicy/utilities
 
 LOCAL_STATIC_LIBRARIES := \
     libmedia_helper \
     libaudiopolicycomponents
 
+ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+LOCAL_STATIC_LIBRARIES += libxml2
+
+LOCAL_SHARED_LIBRARIES += libicuuc
+
+LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
+endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
 LOCAL_MODULE:= libaudiopolicymanagerdefault
 
 include $(BUILD_SHARED_LIBRARY)
@@ -108,7 +123,9 @@
 
 LOCAL_C_INCLUDES += \
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
-    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 LOCAL_MODULE:= libaudiopolicymanager
 
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index c1e7bc0..a215b95 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -158,14 +158,14 @@
                                       int indexMax) = 0;
 
     // sets the new stream volume at a level corresponding to the supplied index for the
-    // supplied device. By convention, specifying AUDIO_DEVICE_OUT_DEFAULT means
+    // supplied device. By convention, specifying AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME means
     // setting volume for all devices
     virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
                                           int index,
                                           audio_devices_t device) = 0;
 
     // retrieve current volume index for the specified stream and the
-    // specified device. By convention, specifying AUDIO_DEVICE_OUT_DEFAULT means
+    // specified device. By convention, specifying AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME means
     // querying the volume of the active device.
     virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
                                           int *index,
@@ -225,8 +225,12 @@
 
     virtual status_t startAudioSource(const struct audio_port_config *source,
                                       const audio_attributes_t *attributes,
-                                      audio_io_handle_t *handle) = 0;
+                                      audio_io_handle_t *handle,
+                                      uid_t uid) = 0;
     virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
+
+    virtual status_t setMasterMono(bool mono) = 0;
+    virtual status_t getMasterMono(bool *mono) = 0;
 };
 
 
@@ -308,7 +312,7 @@
     virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
 
     // move effect to the specified output
-    virtual status_t moveEffects(int session,
+    virtual status_t moveEffects(audio_session_t session,
                                      audio_io_handle_t srcOutput,
                                      audio_io_handle_t dstOutput) = 0;
 
@@ -328,9 +332,15 @@
 
     virtual void onAudioPatchListUpdate() = 0;
 
-    virtual audio_unique_id_t newAudioUniqueId() = 0;
+    virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) = 0;
 
     virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
+
+    virtual void onRecordingConfigurationUpdate(int event, audio_session_t session,
+                    audio_source_t source,
+                    const struct audio_config_base *clientConfig,
+                    const struct audio_config_base *deviceConfig,
+                    audio_patch_handle_t patchHandle) = 0;
 };
 
 extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 712f7a7..d091179 100755
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -30,6 +30,17 @@
     float mDBAttenuation;
 };
 
+/**
+ * device categories used for volume curve management.
+ */
+enum device_category {
+    DEVICE_CATEGORY_HEADSET,
+    DEVICE_CATEGORY_SPEAKER,
+    DEVICE_CATEGORY_EARPIECE,
+    DEVICE_CATEGORY_EXT_MEDIA,
+    DEVICE_CATEGORY_CNT
+};
+
 class Volume
 {
 public:
@@ -50,17 +61,6 @@
     };
 
     /**
-     * device categories used for volume curve management.
-     */
-    enum device_category {
-        DEVICE_CATEGORY_HEADSET,
-        DEVICE_CATEGORY_SPEAKER,
-        DEVICE_CATEGORY_EARPIECE,
-        DEVICE_CATEGORY_EXT_MEDIA,
-        DEVICE_CATEGORY_CNT
-    };
-
-    /**
      * extract one device relevant for volume control from multiple device selection
      *
      * @param[in] device for which the volume category is associated
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 4b73e3c..55ee91f 100755
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -18,13 +18,18 @@
 
 #include <system/audio.h>
 
+static const audio_format_t gDynamicFormat = AUDIO_FORMAT_DEFAULT;
+
 // For mixed output and inputs, the policy will use max mixer sampling rates.
 // Do not limit sampling rate otherwise
-#define MAX_MIXER_SAMPLING_RATE 192000
+#define SAMPLE_RATE_HZ_MAX 192000
+
+// Used when a client opens a capture stream, without specifying a desired sample rate.
+#define SAMPLE_RATE_HZ_DEFAULT 48000
 
 // For mixed output and inputs, the policy will use max mixer channel count.
 // Do not limit channel count otherwise
-#define MAX_MIXER_CHANNEL_COUNT 8
+#define MAX_MIXER_CHANNEL_COUNT FCC_8
 
 /**
  * A device mask for all audio input devices that are considered "virtual" when evaluating
@@ -37,9 +42,26 @@
  * A device mask for all audio input and output devices where matching inputs/outputs on device
  * type alone is not enough: the address must match too
  */
-#define APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL (AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
+#define APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL (AUDIO_DEVICE_OUT_REMOTE_SUBMIX|AUDIO_DEVICE_OUT_BUS)
 
-#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX)
+#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_BUS)
+
+/**
+ * Stub audio output device. Used in policy configuration file on platforms without audio outputs.
+ * This alias value to AUDIO_DEVICE_OUT_DEFAULT is only used in the audio policy context.
+ */
+#define AUDIO_DEVICE_OUT_STUB AUDIO_DEVICE_OUT_DEFAULT
+/**
+ * Stub audio input device. Used in policy configuration file on platforms without audio inputs.
+ * This alias value to AUDIO_DEVICE_IN_DEFAULT is only used in the audio policy context.
+ */
+#define AUDIO_DEVICE_IN_STUB AUDIO_DEVICE_IN_DEFAULT
+/**
+ * Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume
+ * control APIs (e.g setStreamVolumeIndex().
+ */
+#define AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME AUDIO_DEVICE_OUT_DEFAULT
+
 
 /**
  * Check if the state given correspond to an in call state.
@@ -86,3 +108,18 @@
            (((device & AUDIO_DEVICE_BIT_IN) == 0) &&
             ((device & APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL) != 0));
 }
+
+/* Indicates if audio formats are equivalent when considering a match between
+ * audio HAL supported formats and client requested formats
+ */
+static inline bool audio_formats_match(audio_format_t format1,
+                                       audio_format_t format2)
+{
+    if (audio_is_linear_pcm(format1) &&
+            (audio_bytes_per_sample(format1) > 2) &&
+            audio_is_linear_pcm(format2) &&
+            (audio_bytes_per_sample(format2) > 2)) {
+        return true;
+    }
+    return format1 == format2;
+}
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 8728ff3..3b4ae6b 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -5,32 +5,61 @@
 LOCAL_SRC_FILES:= \
     src/DeviceDescriptor.cpp \
     src/AudioGain.cpp \
-    src/StreamDescriptor.cpp \
     src/HwModule.cpp \
     src/IOProfile.cpp \
     src/AudioPort.cpp \
+    src/AudioProfile.cpp \
+    src/AudioRoute.cpp \
     src/AudioPolicyMix.cpp \
     src/AudioPatch.cpp \
     src/AudioInputDescriptor.cpp \
     src/AudioOutputDescriptor.cpp \
+    src/AudioCollections.cpp \
     src/EffectDescriptor.cpp \
-    src/ConfigParsingUtils.cpp \
     src/SoundTriggerSession.cpp \
     src/SessionRoute.cpp \
+    src/AudioSourceDescriptor.cpp \
+    src/VolumeCurve.cpp \
+    src/TypeConverter.cpp \
+    src/AudioSession.cpp
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
     libutils \
     liblog \
 
-LOCAL_C_INCLUDES += \
+LOCAL_C_INCLUDES := \
     $(LOCAL_PATH)/include \
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
-    $(TOPDIR)frameworks/av/services/audiopolicy
+    $(TOPDIR)frameworks/av/services/audiopolicy \
+    $(TOPDIR)frameworks/av/services/audiopolicy/utilities \
+
+ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+
+LOCAL_SRC_FILES += src/Serializer.cpp
+
+LOCAL_STATIC_LIBRARIES += libxml2
+
+LOCAL_SHARED_LIBRARIES += libicuuc
+
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)external/libxml2/include \
+    $(TOPDIR)external/icu/icu4c/source/common
+
+else
+
+LOCAL_SRC_FILES += \
+    src/ConfigParsingUtils.cpp \
+    src/StreamDescriptor.cpp \
+    src/Gains.cpp
+
+endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 
 LOCAL_EXPORT_C_INCLUDE_DIRS := \
     $(LOCAL_PATH)/include
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
 LOCAL_MODULE := libaudiopolicycomponents
 
 include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h b/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
new file mode 100644
index 0000000..8f00d22
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
+namespace android {
+
+class AudioPort;
+class AudioRoute;
+
+class AudioPortVector : public Vector<sp<AudioPort> >
+{
+public:
+    sp<AudioPort> findByTagName(const String8 &tagName) const;
+};
+
+
+class AudioRouteVector : public Vector<sp<AudioRoute> >
+{
+public:
+    status_t dump(int fd, int spaces) const;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
index 21fbf9b..cea5c0b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
@@ -28,10 +28,39 @@
     AudioGain(int index, bool useInChannelMask);
     virtual ~AudioGain() {}
 
+    void setMode(audio_gain_mode_t mode) { mGain.mode = mode; }
+    const audio_gain_mode_t &getMode() const { return mGain.mode; }
+
+    void setChannelMask(audio_channel_mask_t mask) { mGain.channel_mask = mask; }
+    const audio_channel_mask_t &getChannelMask() const { return mGain.channel_mask; }
+
+    void setMinValueInMb(int minValue) { mGain.min_value = minValue; }
+    int getMinValueInMb() const { return mGain.min_value; }
+
+    void setMaxValueInMb(int maxValue) { mGain.max_value = maxValue; }
+    int getMaxValueInMb() const { return mGain.max_value; }
+
+    void setDefaultValueInMb(int defaultValue) { mGain.default_value = defaultValue; }
+    int getDefaultValueInMb() const { return mGain.default_value; }
+
+    void setStepValueInMb(uint32_t stepValue) { mGain.step_value = stepValue; }
+    int getStepValueInMb() const { return mGain.step_value; }
+
+    void setMinRampInMs(uint32_t minRamp) { mGain.min_ramp_ms = minRamp; }
+    int getMinRampInMs() const { return mGain.min_ramp_ms; }
+
+    void setMaxRampInMs(uint32_t maxRamp) { mGain.max_ramp_ms = maxRamp; }
+    int getMaxRampInMs() const { return mGain.max_ramp_ms; }
+
+    // TODO: remove dump from here (split serialization)
     void dump(int fd, int spaces, int index) const;
 
     void getDefaultConfig(struct audio_gain_config *config);
     status_t checkConfig(const struct audio_gain_config *config);
+
+    const struct audio_gain &getGain() const { return mGain; }
+
+private:
     int               mIndex;
     struct audio_gain mGain;
     bool              mUseInChannelMask;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 48d09ed..46309ed 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -17,6 +17,8 @@
 #pragma once
 
 #include "AudioPort.h"
+#include "AudioSession.h"
+#include "AudioSessionInfoProvider.h"
 #include <utils/Errors.h>
 #include <system/audio.h>
 #include <utils/SortedVector.h>
@@ -29,28 +31,21 @@
 
 // descriptor for audio inputs. Used to maintain current configuration of each opened audio input
 // and keep track of the usage of this input.
-class AudioInputDescriptor: public AudioPortConfig
+class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
 {
 public:
     AudioInputDescriptor(const sp<IOProfile>& profile);
     void setIoHandle(audio_io_handle_t ioHandle);
     audio_port_handle_t getId() const;
     audio_module_handle_t getModuleHandle() const;
+    uint32_t getOpenRefCount() const;
 
     status_t    dump(int fd);
 
     audio_io_handle_t             mIoHandle;       // input handle
     audio_devices_t               mDevice;         // current device this input is routed to
     AudioMix                      *mPolicyMix;     // non NULL when used by a dynamic policy
-    audio_patch_handle_t          mPatchHandle;
-    uint32_t                      mRefCount;       // number of AudioRecord clients using
-    // this input
-    uint32_t                      mOpenRefCount;
-    audio_source_t                mInputSource;    // input source selected by application
-    //(mediarecorder.h)
     const sp<IOProfile>           mProfile;        // I/O profile this output derives from
-    SortedVector<audio_session_t> mSessions;       // audio sessions attached to this input
-    bool                          mIsSoundTrigger; // used by a soundtrigger capture
 
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
             const struct audio_port_config *srcConfig = NULL) const;
@@ -60,17 +55,34 @@
     SortedVector<audio_session_t> getPreemptedSessions() const;
     bool hasPreemptedSession(audio_session_t session) const;
     void clearPreemptedSessions();
+    bool isActive() const;
+    bool isSourceActive(audio_source_t source) const;
+    audio_source_t inputSource() const;
+    bool isSoundTrigger() const;
+    status_t addAudioSession(audio_session_t session,
+                             const sp<AudioSession>& audioSession);
+    status_t removeAudioSession(audio_session_t session);
+    sp<AudioSession> getAudioSession(audio_session_t session) const;
+    AudioSessionCollection getActiveAudioSessions() const;
+
+    // implementation of AudioSessionInfoProvider
+    virtual audio_config_base_t getConfig() const;
+    virtual audio_patch_handle_t getPatchHandle() const;
+
+    void setPatchHandle(audio_patch_handle_t handle);
 
 private:
+    audio_patch_handle_t          mPatchHandle;
     audio_port_handle_t           mId;
-    // Because a preemtible capture session can preempt another one, we end up in an endless loop
+    // audio sessions attached to this input
+    AudioSessionCollection        mSessions;
+    // Because a preemptible capture session can preempt another one, we end up in an endless loop
     // situation were each session is allowed to restart after being preempted,
     // thus preempting the other one which restarts and so on.
     // To avoid this situation, we store which audio session was preempted when
     // a particular input started and prevent preemption of this active input by this session.
     // We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
     SortedVector<audio_session_t> mPreemptedSessions;
-
 };
 
 class AudioInputCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 50f622d..dd3f8ae 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -22,12 +22,14 @@
 #include <utils/Timers.h>
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
+#include "AudioSourceDescriptor.h"
 
 namespace android {
 
 class IOProfile;
 class AudioMix;
 class AudioPolicyClientInterface;
+class DeviceDescriptor;
 
 // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
 // and keep track of the usage of this output by each audio stream type.
@@ -69,9 +71,11 @@
 
     audio_module_handle_t getModuleHandle() const;
 
+    audio_patch_handle_t getPatchHandle() const { return mPatchHandle; };
+    void setPatchHandle(audio_patch_handle_t handle) { mPatchHandle = handle; };
+
     sp<AudioPort>       mPort;
     audio_devices_t mDevice;                   // current device this output is routed to
-    audio_patch_handle_t mPatchHandle;
     uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
     nsecs_t mStopTime[AUDIO_STREAM_CNT];
     float mCurVolume[AUDIO_STREAM_CNT];   // current stream volume in dB
@@ -81,6 +85,7 @@
     AudioPolicyClientInterface *mClientInterface;
 
 protected:
+    audio_patch_handle_t mPatchHandle;
     audio_port_handle_t mId;
 };
 
@@ -126,6 +131,31 @@
     uint32_t mGlobalRefCount;  // non-stream-specific ref count
 };
 
+// Audio output driven by an input device directly.
+class HwAudioOutputDescriptor: public AudioOutputDescriptor
+{
+public:
+    HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
+                            AudioPolicyClientInterface *clientInterface);
+    virtual ~HwAudioOutputDescriptor() {}
+
+    status_t    dump(int fd);
+
+    virtual audio_devices_t supportedDevices();
+    virtual bool setVolume(float volume,
+                           audio_stream_type_t stream,
+                           audio_devices_t device,
+                           uint32_t delayMs,
+                           bool force);
+
+    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+                           const struct audio_port_config *srcConfig = NULL) const;
+    virtual void toAudioPort(struct audio_port *port) const;
+
+    const sp<AudioSourceDescriptor> mSource;
+
+};
+
 class SwAudioOutputCollection :
         public DefaultKeyedVector< audio_io_handle_t, sp<SwAudioOutputDescriptor> >
 {
@@ -160,4 +190,19 @@
     status_t dump(int fd) const;
 };
 
+class HwAudioOutputCollection :
+        public DefaultKeyedVector< audio_io_handle_t, sp<HwAudioOutputDescriptor> >
+{
+public:
+    bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+
+    /**
+     * return true if any output is playing anything besides the stream to ignore
+     */
+    bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+
+    status_t dump(int fd) const;
+};
+
+
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
new file mode 100644
index 0000000..f2756b5
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <AudioGain.h>
+#include <VolumeCurve.h>
+#include <AudioPort.h>
+#include <AudioPatch.h>
+#include <DeviceDescriptor.h>
+#include <IOProfile.h>
+#include <HwModule.h>
+#include <AudioInputDescriptor.h>
+#include <AudioOutputDescriptor.h>
+#include <AudioPolicyMix.h>
+#include <EffectDescriptor.h>
+#include <SoundTriggerSession.h>
+#include <SessionRoute.h>
+
+namespace android {
+
+class AudioPolicyConfig
+{
+public:
+    AudioPolicyConfig(HwModuleCollection &hwModules,
+                      DeviceVector &availableOutputDevices,
+                      DeviceVector &availableInputDevices,
+                      sp<DeviceDescriptor> &defaultOutputDevices,
+                      bool &isSpeakerDrcEnabled,
+                      VolumeCurvesCollection *volumes = nullptr)
+        : mHwModules(hwModules),
+          mAvailableOutputDevices(availableOutputDevices),
+          mAvailableInputDevices(availableInputDevices),
+          mDefaultOutputDevices(defaultOutputDevices),
+          mVolumeCurves(volumes),
+          mIsSpeakerDrcEnabled(isSpeakerDrcEnabled)
+    {}
+
+    void setVolumes(const VolumeCurvesCollection &volumes)
+    {
+        if (mVolumeCurves != nullptr) {
+            *mVolumeCurves = volumes;
+        }
+    }
+
+    void setHwModules(const HwModuleCollection &hwModules)
+    {
+        mHwModules = hwModules;
+    }
+
+    void addAvailableDevice(const sp<DeviceDescriptor> &availableDevice)
+    {
+        if (audio_is_output_device(availableDevice->type())) {
+            mAvailableOutputDevices.add(availableDevice);
+        } else if (audio_is_input_device(availableDevice->type())) {
+            mAvailableInputDevices.add(availableDevice);
+        }
+    }
+
+    void addAvailableInputDevices(const DeviceVector &availableInputDevices)
+    {
+        mAvailableInputDevices.add(availableInputDevices);
+    }
+
+    void addAvailableOutputDevices(const DeviceVector &availableOutputDevices)
+    {
+        mAvailableOutputDevices.add(availableOutputDevices);
+    }
+
+    void setSpeakerDrcEnabled(bool isSpeakerDrcEnabled)
+    {
+        mIsSpeakerDrcEnabled = isSpeakerDrcEnabled;
+    }
+
+    const HwModuleCollection getHwModules() const { return mHwModules; }
+
+    const DeviceVector &getAvailableInputDevices() const
+    {
+        return mAvailableInputDevices;
+    }
+
+    const DeviceVector &getAvailableOutputDevices() const
+    {
+        return mAvailableOutputDevices;
+    }
+
+    void setDefaultOutputDevice(const sp<DeviceDescriptor> &defaultDevice)
+    {
+        mDefaultOutputDevices = defaultDevice;
+    }
+
+    const sp<DeviceDescriptor> &getDefaultOutputDevice() const { return mDefaultOutputDevices; }
+
+    void setDefault(void)
+    {
+        mDefaultOutputDevices = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
+        sp<HwModule> module;
+        sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
+        mAvailableOutputDevices.add(mDefaultOutputDevices);
+        mAvailableInputDevices.add(defaultInputDevice);
+
+        module = new HwModule("primary");
+
+        sp<OutputProfile> outProfile;
+        outProfile = new OutputProfile(String8("primary"));
+        outProfile->attach(module);
+        outProfile->addAudioProfile(
+                new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 44100));
+        outProfile->addSupportedDevice(mDefaultOutputDevices);
+        outProfile->setFlags(AUDIO_OUTPUT_FLAG_PRIMARY);
+        module->mOutputProfiles.add(outProfile);
+
+        sp<InputProfile> inProfile;
+        inProfile = new InputProfile(String8("primary"));
+        inProfile->attach(module);
+        inProfile->addAudioProfile(
+                new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000));
+        inProfile->addSupportedDevice(defaultInputDevice);
+        module->mInputProfiles.add(inProfile);
+
+        mHwModules.add(module);
+    }
+
+private:
+    HwModuleCollection &mHwModules; /**< Collection of Module, with Profiles, i.e. Mix Ports. */
+    DeviceVector &mAvailableOutputDevices;
+    DeviceVector &mAvailableInputDevices;
+    sp<DeviceDescriptor> &mDefaultOutputDevices;
+    VolumeCurvesCollection *mVolumeCurves;
+    bool &mIsSpeakerDrcEnabled;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index d51f4e1..8f5ebef 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -54,7 +54,7 @@
 public:
     status_t getAudioPolicyMix(String8 address, sp<AudioPolicyMix> &policyMix) const;
 
-    status_t registerMix(String8 address, AudioMix mix);
+    status_t registerMix(String8 address, AudioMix mix, sp<SwAudioOutputDescriptor> desc);
 
     status_t unregisterMix(String8 address);
 
@@ -69,7 +69,8 @@
      * @return NO_ERROR if an output was found for the given attribute (in this case, the
      *                  descriptor output param is initialized), error code otherwise.
      */
-    status_t getOutputForAttr(audio_attributes_t attributes, sp<SwAudioOutputDescriptor> &desc);
+    status_t getOutputForAttr(audio_attributes_t attributes, uid_t uid,
+            sp<SwAudioOutputDescriptor> &desc);
 
     audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource,
                                                   audio_devices_t availableDeviceTypes,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 4fdf5b4..211ec98 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -16,6 +16,8 @@
 
 #pragma once
 
+#include "AudioCollections.h"
+#include "AudioProfile.h"
 #include <utils/String8.h>
 #include <utils/Vector.h>
 #include <utils/RefBase.h>
@@ -27,82 +29,129 @@
 
 class HwModule;
 class AudioGain;
+class AudioRoute;
+typedef Vector<sp<AudioGain> > AudioGainCollection;
 
 class AudioPort : public virtual RefBase
 {
 public:
-    AudioPort(const String8& name, audio_port_type_t type,
-              audio_port_role_t role);
+    AudioPort(const String8& name, audio_port_type_t type,  audio_port_role_t role) :
+        mName(name), mType(type), mRole(role), mFlags(AUDIO_OUTPUT_FLAG_NONE) {}
+
     virtual ~AudioPort() {}
 
+    void setName(const String8 &name) { mName = name; }
+    const String8 &getName() const { return mName; }
+
+    audio_port_type_t getType() const { return mType; }
+    audio_port_role_t getRole() const { return mRole; }
+
+    virtual const String8 getTagName() const = 0;
+
+    void setGains(const AudioGainCollection &gains) { mGains = gains; }
+    const AudioGainCollection &getGains() const { return mGains; }
+
+    void setFlags(uint32_t flags)
+    {
+        //force direct flag if offload flag is set: offloading implies a direct output stream
+        // and all common behaviors are driven by checking only the direct flag
+        // this should normally be set appropriately in the policy configuration file
+        if (mRole == AUDIO_PORT_ROLE_SOURCE && (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+            flags |= AUDIO_OUTPUT_FLAG_DIRECT;
+        }
+        mFlags = flags;
+    }
+    uint32_t getFlags() const { return mFlags; }
+
     virtual void attach(const sp<HwModule>& module);
     bool isAttached() { return mModule != 0; }
 
+    // Audio port IDs are in a different namespace than AudioFlinger unique IDs
     static audio_port_handle_t getNextUniqueId();
 
     virtual void toAudioPort(struct audio_port *port) const;
 
     virtual void importAudioPort(const sp<AudioPort> port);
-    void clearCapabilities();
 
-    void loadSamplingRates(char *name);
-    void loadFormats(char *name);
-    void loadOutChannels(char *name);
-    void loadInChannels(char *name);
+    void addAudioProfile(const sp<AudioProfile> &profile) { mProfiles.add(profile); }
 
-    audio_gain_mode_t loadGainMode(char *name);
-    void loadGain(cnode *root, int index);
-    virtual void loadGains(cnode *root);
+    void setAudioProfiles(const AudioProfileVector &profiles) { mProfiles = profiles; }
+    AudioProfileVector &getAudioProfiles() { return mProfiles; }
+
+    bool hasValidAudioProfile() const { return mProfiles.hasValidProfile(); }
+
+    bool hasDynamicAudioProfile() const { return mProfiles.hasDynamicProfile(); }
 
     // searches for an exact match
-    status_t checkExactSamplingRate(uint32_t samplingRate) const;
-    // searches for a compatible match, and returns the best match via updatedSamplingRate
-    status_t checkCompatibleSamplingRate(uint32_t samplingRate,
-            uint32_t *updatedSamplingRate) const;
-    // searches for an exact match
-    status_t checkExactChannelMask(audio_channel_mask_t channelMask) const;
-    // searches for a compatible match, currently implemented for input channel masks only
-    status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
-            audio_channel_mask_t *updatedChannelMask) const;
+    status_t checkExactAudioProfile(uint32_t samplingRate,
+                                    audio_channel_mask_t channelMask,
+                                    audio_format_t format) const
+    {
+        return mProfiles.checkExactProfile(samplingRate, channelMask, format);
+    }
 
-    status_t checkExactFormat(audio_format_t format) const;
-    // searches for a compatible match, currently implemented for input formats only
-    status_t checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat) const;
+    // searches for a compatible match, currently implemented for input
+    // parameters are input|output, returned value is the best match.
+    status_t checkCompatibleAudioProfile(uint32_t &samplingRate,
+                                         audio_channel_mask_t &channelMask,
+                                         audio_format_t &format) const
+    {
+        return mProfiles.checkCompatibleProfile(samplingRate, channelMask, format, mType, mRole);
+    }
+
+    void clearAudioProfiles() { return mProfiles.clearProfiles(); }
+
     status_t checkGain(const struct audio_gain_config *gainConfig, int index) const;
 
-    uint32_t pickSamplingRate() const;
-    audio_channel_mask_t pickChannelMask() const;
-    audio_format_t pickFormat() const;
+    void pickAudioProfile(uint32_t &samplingRate,
+                          audio_channel_mask_t &channelMask,
+                          audio_format_t &format) const;
 
     static const audio_format_t sPcmFormatCompareTable[];
-    static int compareFormats(const audio_format_t *format1, const audio_format_t *format2) {
-        return compareFormats(*format1, *format2);
-    }
+
     static int compareFormats(audio_format_t format1, audio_format_t format2);
 
+    // Used to select an audio HAL output stream with a sample format providing the
+    // less degradation for a given AudioTrack sample format.
+    static bool isBetterFormatMatch(audio_format_t newFormat,
+                                        audio_format_t currentFormat,
+                                        audio_format_t targetFormat);
+
     audio_module_handle_t getModuleHandle() const;
     uint32_t getModuleVersion() const;
     const char *getModuleName() const;
 
-    void dump(int fd, int spaces) const;
+    bool useInputChannelMask() const
+    {
+        return ((mType == AUDIO_PORT_TYPE_DEVICE) && (mRole == AUDIO_PORT_ROLE_SOURCE)) ||
+                ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SINK));
+    }
+
+    inline bool isDirectOutput() const
+    {
+        return (mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+                (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD));
+    }
+
+    void addRoute(const sp<AudioRoute> &route) { mRoutes.add(route); }
+    const AudioRouteVector &getRoutes() const { return mRoutes; }
+
+    void dump(int fd, int spaces, bool verbose = true) const;
     void log(const char* indent) const;
 
-    String8           mName;
-    audio_port_type_t mType;
-    audio_port_role_t mRole;
-    bool              mUseInChannelMask;
-    // by convention, "0' in the first entry in mSamplingRates, mChannelMasks or mFormats
-    // indicates the supported parameters should be read from the output stream
-    // after it is opened for the first time
-    Vector <uint32_t> mSamplingRates; // supported sampling rates
-    Vector <audio_channel_mask_t> mChannelMasks; // supported channel masks
-    Vector <audio_format_t> mFormats; // supported audio formats
-    Vector < sp<AudioGain> > mGains; // gain controllers
+    AudioGainCollection mGains; // gain controllers
     sp<HwModule> mModule;                 // audio HW module exposing this I/O stream
-    uint32_t mFlags; // attribute flags (e.g primary output,
-                     // direct output...).
 
 private:
+    void pickChannelMask(audio_channel_mask_t &channelMask, const ChannelsVector &channelMasks) const;
+    void pickSamplingRate(uint32_t &rate,const SampleRateVector &samplingRates) const;
+
+    String8  mName;
+    audio_port_type_t mType;
+    audio_port_role_t mRole;
+    uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
+    AudioProfileVector mProfiles; // AudioProfiles supported by this port (format, Rates, Channels)
+    AudioRouteVector mRoutes; // Routes involving this port
     static volatile int32_t mNextUniqueId;
 };
 
@@ -113,9 +162,9 @@
     virtual ~AudioPortConfig() {}
 
     status_t applyAudioPortConfig(const struct audio_port_config *config,
-            struct audio_port_config *backupConfig = NULL);
+                                  struct audio_port_config *backupConfig = NULL);
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
-            const struct audio_port_config *srcConfig = NULL) const = 0;
+                                   const struct audio_port_config *srcConfig = NULL) const = 0;
     virtual sp<AudioPort> getAudioPort() const = 0;
     uint32_t mSamplingRate;
     audio_format_t mFormat;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
new file mode 100644
index 0000000..404e27d
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "policy.h"
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
+namespace android {
+
+typedef SortedVector<uint32_t> SampleRateVector;
+typedef SortedVector<audio_channel_mask_t> ChannelsVector;
+typedef Vector<audio_format_t> FormatVector;
+
+template <typename T>
+bool operator == (const SortedVector<T> &left, const SortedVector<T> &right);
+
+class AudioProfile : public virtual RefBase
+{
+public:
+    AudioProfile(audio_format_t format,
+                 audio_channel_mask_t channelMasks,
+                 uint32_t samplingRate) :
+        mName(String8("")),
+        mFormat(format)
+    {
+        mChannelMasks.add(channelMasks);
+        mSamplingRates.add(samplingRate);
+    }
+
+    AudioProfile(audio_format_t format,
+                 const ChannelsVector &channelMasks,
+                 const SampleRateVector &samplingRateCollection) :
+        mName(String8("")),
+        mFormat(format),
+        mChannelMasks(channelMasks),
+        mSamplingRates(samplingRateCollection)
+    {}
+
+    audio_format_t getFormat() const { return mFormat; }
+
+    void setChannels(const ChannelsVector &channelMasks)
+    {
+        if (mIsDynamicChannels) {
+            mChannelMasks = channelMasks;
+        }
+    }
+    const ChannelsVector &getChannels() const { return mChannelMasks; }
+
+    void setSampleRates(const SampleRateVector &sampleRates)
+    {
+        if (mIsDynamicRate) {
+            mSamplingRates = sampleRates;
+        }
+    }
+    const SampleRateVector &getSampleRates() const { return mSamplingRates; }
+
+    bool isValid() const { return hasValidFormat() && hasValidRates() && hasValidChannels(); }
+
+    void clear()
+    {
+        if (mIsDynamicChannels) {
+            mChannelMasks.clear();
+        }
+        if (mIsDynamicRate) {
+            mSamplingRates.clear();
+        }
+    }
+
+    inline bool supportsChannels(audio_channel_mask_t channels) const
+    {
+        return mChannelMasks.indexOf(channels) >= 0;
+    }
+    inline bool supportsRate(uint32_t rate) const
+    {
+        return mSamplingRates.indexOf(rate) >= 0;
+    }
+
+    status_t checkExact(uint32_t rate, audio_channel_mask_t channels, audio_format_t format) const;
+
+    status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
+                                        audio_channel_mask_t &updatedChannelMask,
+                                        audio_port_type_t portType,
+                                        audio_port_role_t portRole) const;
+
+    status_t checkCompatibleSamplingRate(uint32_t samplingRate,
+                                         uint32_t &updatedSamplingRate) const;
+
+    bool hasValidFormat() const { return mFormat != AUDIO_FORMAT_DEFAULT; }
+    bool hasValidRates() const { return !mSamplingRates.isEmpty(); }
+    bool hasValidChannels() const { return !mChannelMasks.isEmpty(); }
+
+    void setDynamicChannels(bool dynamic) { mIsDynamicChannels = dynamic; }
+    bool isDynamicChannels() const { return mIsDynamicChannels; }
+
+    void setDynamicRate(bool dynamic) { mIsDynamicRate = dynamic; }
+    bool isDynamicRate() const { return mIsDynamicRate; }
+
+    void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
+    bool isDynamicFormat() const { return mIsDynamicFormat; }
+
+    bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
+
+    void dump(int fd, int spaces) const;
+
+private:
+    String8  mName;
+    audio_format_t mFormat;
+    ChannelsVector mChannelMasks;
+    SampleRateVector mSamplingRates;
+
+    bool mIsDynamicFormat = false;
+    bool mIsDynamicChannels = false;
+    bool mIsDynamicRate = false;
+};
+
+
+class AudioProfileVector : public Vector<sp<AudioProfile> >
+{
+public:
+    ssize_t add(const sp<AudioProfile> &profile)
+    {
+        ssize_t index = Vector::add(profile);
+        // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
+        // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
+        // [](const audio_format_t *format1, const audio_format_t *format2) {
+        //     return compareFormats(*format1, *format2);
+        // }
+        sort(compareFormats);
+        return index;
+    }
+
+    // This API is intended to be used by the policy manager once retrieving capabilities
+    // for a profile with dynamic format, rate and channels attributes
+    ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd)
+    {
+        // Check valid profile to add:
+        if (!profileToAdd->hasValidFormat()) {
+            return -1;
+        }
+        if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+            FormatVector formats;
+            formats.add(profileToAdd->getFormat());
+            setFormats(FormatVector(formats));
+            return 0;
+        }
+        if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
+            setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
+            return 0;
+        }
+        if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+            setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
+            return 0;
+        }
+        // Go through the list of profile to avoid duplicates
+        for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
+            const sp<AudioProfile> &profile = itemAt(profileIndex);
+            if (profile->isValid() && profile == profileToAdd) {
+                // Nothing to do
+                return profileIndex;
+            }
+        }
+        profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
+        return add(profileToAdd);
+    }
+
+    sp<AudioProfile> getFirstValidProfile() const
+    {
+        for (size_t i = 0; i < size(); i++) {
+            if (itemAt(i)->isValid()) {
+                return itemAt(i);
+            }
+        }
+        return 0;
+    }
+
+    bool hasValidProfile() const { return getFirstValidProfile() != 0; }
+
+    status_t checkExactProfile(uint32_t samplingRate, audio_channel_mask_t channelMask,
+                               audio_format_t format) const;
+
+    status_t checkCompatibleProfile(uint32_t &samplingRate, audio_channel_mask_t &channelMask,
+                                    audio_format_t &format,
+                                    audio_port_type_t portType,
+                                    audio_port_role_t portRole) const;
+
+    FormatVector getSupportedFormats() const
+    {
+        FormatVector supportedFormats;
+        for (size_t i = 0; i < size(); i++) {
+            if (itemAt(i)->hasValidFormat()) {
+                supportedFormats.add(itemAt(i)->getFormat());
+            }
+        }
+        return supportedFormats;
+    }
+
+    bool hasDynamicProfile() const
+    {
+        for (size_t i = 0; i < size(); i++) {
+            if (itemAt(i)->isDynamic()) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    bool hasDynamicFormat() const
+    {
+        return getProfileFor(gDynamicFormat) != 0;
+    }
+
+    bool hasDynamicChannelsFor(audio_format_t format) const
+    {
+       for (size_t i = 0; i < size(); i++) {
+           sp<AudioProfile> profile = itemAt(i);
+           if (profile->getFormat() == format && profile->isDynamicChannels()) {
+               return true;
+           }
+       }
+       return false;
+    }
+
+    bool hasDynamicRateFor(audio_format_t format) const
+    {
+        for (size_t i = 0; i < size(); i++) {
+            sp<AudioProfile> profile = itemAt(i);
+            if (profile->getFormat() == format && profile->isDynamicRate()) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    // One audio profile will be added for each format supported by Audio HAL
+    void setFormats(const FormatVector &formats)
+    {
+        // Only allow to change the format of dynamic profile
+        sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
+        if (dynamicFormatProfile == 0) {
+            return;
+        }
+        for (size_t i = 0; i < formats.size(); i++) {
+            sp<AudioProfile> profile = new AudioProfile(formats[i],
+                                                        dynamicFormatProfile->getChannels(),
+                                                        dynamicFormatProfile->getSampleRates());
+            profile->setDynamicFormat(true);
+            profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
+            profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
+            add(profile);
+        }
+    }
+
+    void clearProfiles()
+    {
+        for (size_t i = size(); i != 0; ) {
+            sp<AudioProfile> profile = itemAt(--i);
+            if (profile->isDynamicFormat() && profile->hasValidFormat()) {
+                removeAt(i);
+                continue;
+            }
+            profile->clear();
+        }
+    }
+
+    void dump(int fd, int spaces) const
+    {
+        const size_t SIZE = 256;
+        char buffer[SIZE];
+
+        snprintf(buffer, SIZE, "%*s- Profiles:\n", spaces, "");
+        write(fd, buffer, strlen(buffer));
+        for (size_t i = 0; i < size(); i++) {
+            snprintf(buffer, SIZE, "%*sProfile %zu:", spaces + 4, "", i);
+            write(fd, buffer, strlen(buffer));
+            itemAt(i)->dump(fd, spaces + 8);
+        }
+    }
+
+private:
+    void setSampleRatesFor(const SampleRateVector &sampleRates, audio_format_t format)
+    {
+        for (size_t i = 0; i < size(); i++) {
+            sp<AudioProfile> profile = itemAt(i);
+            if (profile->getFormat() == format && profile->isDynamicRate()) {
+                if (profile->hasValidRates()) {
+                    // Need to create a new profile with same format
+                    sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
+                                                                     sampleRates);
+                    profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+                    add(profileToAdd);
+                } else {
+                    profile->setSampleRates(sampleRates);
+                }
+                return;
+            }
+        }
+    }
+
+    void setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format)
+    {
+        for (size_t i = 0; i < size(); i++) {
+            sp<AudioProfile> profile = itemAt(i);
+            if (profile->getFormat() == format && profile->isDynamicChannels()) {
+                if (profile->hasValidChannels()) {
+                    // Need to create a new profile with same format
+                    sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
+                                                                     profile->getSampleRates());
+                    profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+                    add(profileToAdd);
+                } else {
+                    profile->setChannels(channelMasks);
+                }
+                return;
+            }
+        }
+    }
+
+    sp<AudioProfile> getProfileFor(audio_format_t format) const
+    {
+        for (size_t i = 0; i < size(); i++) {
+            if (itemAt(i)->getFormat() == format) {
+                return itemAt(i);
+            }
+        }
+        return 0;
+    }
+
+    static int compareFormats(const sp<AudioProfile> *profile1, const sp<AudioProfile> *profile2);
+};
+
+bool operator == (const AudioProfile &left, const AudioProfile &right);
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
new file mode 100644
index 0000000..67e197f
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "AudioCollections.h"
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+
+namespace android
+{
+
+class AudioPort;
+class DeviceDescriptor;
+
+typedef enum {
+    AUDIO_ROUTE_MUX = 0,
+    AUDIO_ROUTE_MIX = 1
+} audio_route_type_t;
+
+class AudioRoute  : public virtual RefBase
+{
+public:
+    AudioRoute(audio_route_type_t type) : mType(type) {}
+
+    void setSources(const AudioPortVector &sources) { mSources = sources; }
+    const AudioPortVector &getSources() const { return mSources; }
+
+    void setSink(const sp<AudioPort> &sink) { mSink = sink; }
+    const sp<AudioPort> &getSink() const { return mSink; }
+
+    audio_route_type_t getType() const { return mType; }
+
+    void dump(int fd, int spaces) const;
+
+private:
+    AudioPortVector mSources;
+    sp<AudioPort> mSink;
+    audio_route_type_t mType;
+
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
new file mode 100644
index 0000000..388c25d
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <media/AudioPolicy.h>
+#include "AudioSessionInfoProvider.h"
+
+namespace android {
+
+class AudioPolicyClientInterface;
+
+class AudioSession : public RefBase, public AudioSessionInfoUpdateListener
+{
+public:
+    AudioSession(audio_session_t session,
+                 audio_source_t inputSource,
+                 audio_format_t format,
+                 uint32_t sampleRate,
+                 audio_channel_mask_t channelMask,
+                 audio_input_flags_t flags,
+                 uid_t uid,
+                 bool isSoundTrigger,
+                 AudioMix* policyMix,
+                 AudioPolicyClientInterface *clientInterface);
+
+    status_t dump(int fd, int spaces, int index) const;
+
+    audio_session_t session() const { return mSession; }
+    audio_source_t inputSource()const { return mInputSource; }
+    audio_format_t format() const { return mConfig.format; }
+    uint32_t sampleRate() const { return mConfig.sample_rate; }
+    audio_channel_mask_t channelMask() const { return mConfig.channel_mask; }
+    audio_input_flags_t flags() const { return mFlags; }
+    uid_t uid() const { return mUid; }
+    bool matches(const sp<AudioSession> &other) const;
+    bool isSoundTrigger() const { return mIsSoundTrigger; }
+    uint32_t openCount() const { return mOpenCount; } ;
+    uint32_t activeCount() const { return mActiveCount; } ;
+
+    uint32_t changeOpenCount(int delta);
+    uint32_t changeActiveCount(int delta);
+
+    void setInfoProvider(AudioSessionInfoProvider *provider);
+    // implementation of AudioSessionInfoUpdateListener
+    virtual void onSessionInfoUpdate() const;
+
+private:
+    const audio_session_t mSession;
+    const audio_source_t mInputSource;
+    const struct audio_config_base mConfig;
+    const audio_input_flags_t mFlags;
+    const uid_t mUid;
+    bool  mIsSoundTrigger;
+    uint32_t  mOpenCount;
+    uint32_t  mActiveCount;
+    AudioMix* mPolicyMix; // non NULL when used by a dynamic policy
+    AudioPolicyClientInterface* mClientInterface;
+    const AudioSessionInfoProvider* mInfoProvider;
+};
+
+class AudioSessionCollection :
+    public DefaultKeyedVector<audio_session_t, sp<AudioSession> >,
+    public AudioSessionInfoUpdateListener
+{
+public:
+    status_t addSession(audio_session_t session,
+                             const sp<AudioSession>& audioSession,
+                             AudioSessionInfoProvider *provider);
+
+    status_t removeSession(audio_session_t session);
+
+    uint32_t getOpenCount() const;
+
+    AudioSessionCollection getActiveSessions() const;
+    bool hasActiveSession() const;
+    bool isSourceActive(audio_source_t source) const;
+
+    // implementation of AudioSessionInfoUpdateListener
+    virtual void onSessionInfoUpdate() const;
+
+    status_t dump(int fd, int spaces) const;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h b/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
new file mode 100644
index 0000000..e0037fc
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace android {
+
+/**
+ * Interface for input descriptors to implement so dependent audio sessions can query information
+ * about their context
+ */
+class AudioSessionInfoProvider
+{
+public:
+    virtual ~AudioSessionInfoProvider() {};
+
+    virtual audio_config_base_t getConfig() const = 0;
+
+    virtual audio_patch_handle_t getPatchHandle() const = 0;
+
+};
+
+class AudioSessionInfoUpdateListener
+{
+public:
+    virtual ~AudioSessionInfoUpdateListener() {};
+
+    virtual void onSessionInfoUpdate() const = 0;;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
new file mode 100644
index 0000000..4ab7cf0
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <RoutingStrategy.h>
+#include <AudioPatch.h>
+
+namespace android {
+
+class SwAudioOutputDescriptor;
+class HwAudioOutputDescriptor;
+class DeviceDescriptor;
+
+class AudioSourceDescriptor: public RefBase
+{
+public:
+    AudioSourceDescriptor(const sp<DeviceDescriptor> device, const audio_attributes_t *attributes,
+                          uid_t uid) :
+        mDevice(device), mAttributes(*attributes), mUid(uid) {}
+    virtual ~AudioSourceDescriptor() {}
+
+    audio_patch_handle_t getHandle() const { return mPatchDesc->mHandle; }
+
+    status_t    dump(int fd);
+
+    const sp<DeviceDescriptor> mDevice;
+    const audio_attributes_t mAttributes;
+    uid_t mUid;
+    sp<AudioPatch> mPatchDesc;
+    wp<SwAudioOutputDescriptor> mSwOutput;
+    wp<HwAudioOutputDescriptor> mHwOutput;
+};
+
+class AudioSourceCollection :
+        public DefaultKeyedVector< audio_io_handle_t, sp<AudioSourceDescriptor> >
+{
+public:
+    status_t dump(int fd) const;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
index 78d2cdf..ee95ceb 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
@@ -16,6 +16,7 @@
 
 #pragma once
 
+#include "AudioPolicyConfig.h"
 #include "DeviceDescriptor.h"
 #include "HwModule.h"
 #include "audio_policy_conf.h"
@@ -33,243 +34,26 @@
 // Definitions for audio_policy.conf file parsing
 // ----------------------------------------------------------------------------
 
-struct StringToEnum {
-    const char *name;
-    uint32_t value;
-};
-
-// TODO: move to a separate file. Should be in sync with audio.h.
-#define STRING_TO_ENUM(string) { #string, (uint32_t)string } // uint32_t cast removes warning
-#define NAME_TO_ENUM(name, value) { name, value }
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-#endif
-
-const StringToEnum sDeviceTypeToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_HDMI),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_LINE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPDIF),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_FM),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_IP),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_AMBIENT),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_HDMI),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_LINE),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_SPDIF),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_IP),
-};
-
-const StringToEnum sDeviceNameToEnumTable[] = {
-    NAME_TO_ENUM("Earpiece", AUDIO_DEVICE_OUT_EARPIECE),
-    NAME_TO_ENUM("Speaker", AUDIO_DEVICE_OUT_SPEAKER),
-    NAME_TO_ENUM("Speaker Protected", AUDIO_DEVICE_OUT_SPEAKER_SAFE),
-    NAME_TO_ENUM("Wired Headset", AUDIO_DEVICE_OUT_WIRED_HEADSET),
-    NAME_TO_ENUM("Wired Headphones", AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
-    NAME_TO_ENUM("BT SCO", AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
-    NAME_TO_ENUM("BT SCO Headset", AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
-    NAME_TO_ENUM("BT SCO Car Kit", AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
-    NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_SCO),
-    NAME_TO_ENUM("BT A2DP Out", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
-    NAME_TO_ENUM("BT A2DP Headphones", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
-    NAME_TO_ENUM("BT A2DP Speaker", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
-    NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_A2DP),
-    NAME_TO_ENUM("HDMI Out", AUDIO_DEVICE_OUT_AUX_DIGITAL),
-    NAME_TO_ENUM("HDMI Out", AUDIO_DEVICE_OUT_HDMI),
-    NAME_TO_ENUM("Analog Dock Out", AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
-    NAME_TO_ENUM("Digital Dock Out", AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
-    NAME_TO_ENUM("USB Host Out", AUDIO_DEVICE_OUT_USB_ACCESSORY),
-    NAME_TO_ENUM("USB Device Out", AUDIO_DEVICE_OUT_USB_DEVICE),
-    NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_USB),
-    NAME_TO_ENUM("Reroute Submix Out", AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
-    NAME_TO_ENUM("Telephony Tx", AUDIO_DEVICE_OUT_TELEPHONY_TX),
-    NAME_TO_ENUM("Line Out", AUDIO_DEVICE_OUT_LINE),
-    NAME_TO_ENUM("HDMI ARC Out", AUDIO_DEVICE_OUT_HDMI_ARC),
-    NAME_TO_ENUM("S/PDIF Out", AUDIO_DEVICE_OUT_SPDIF),
-    NAME_TO_ENUM("FM transceiver Out", AUDIO_DEVICE_OUT_FM),
-    NAME_TO_ENUM("Aux Line Out", AUDIO_DEVICE_OUT_AUX_LINE),
-    NAME_TO_ENUM("IP Out", AUDIO_DEVICE_OUT_IP),
-    NAME_TO_ENUM("Ambient Mic", AUDIO_DEVICE_IN_AMBIENT),
-    NAME_TO_ENUM("Built-In Mic", AUDIO_DEVICE_IN_BUILTIN_MIC),
-    NAME_TO_ENUM("BT SCO Headset Mic", AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
-    NAME_TO_ENUM("", AUDIO_DEVICE_IN_ALL_SCO),
-    NAME_TO_ENUM("Wired Headset Mic", AUDIO_DEVICE_IN_WIRED_HEADSET),
-    NAME_TO_ENUM("HDMI In", AUDIO_DEVICE_IN_AUX_DIGITAL),
-    NAME_TO_ENUM("HDMI In", AUDIO_DEVICE_IN_HDMI),
-    NAME_TO_ENUM("Telephony Rx", AUDIO_DEVICE_IN_TELEPHONY_RX),
-    NAME_TO_ENUM("Telephony Rx", AUDIO_DEVICE_IN_VOICE_CALL),
-    NAME_TO_ENUM("Built-In Back Mic", AUDIO_DEVICE_IN_BACK_MIC),
-    NAME_TO_ENUM("Reroute Submix In", AUDIO_DEVICE_IN_REMOTE_SUBMIX),
-    NAME_TO_ENUM("Analog Dock In", AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
-    NAME_TO_ENUM("Digital Dock In", AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
-    NAME_TO_ENUM("USB Host In", AUDIO_DEVICE_IN_USB_ACCESSORY),
-    NAME_TO_ENUM("USB Device In", AUDIO_DEVICE_IN_USB_DEVICE),
-    NAME_TO_ENUM("FM Tuner In", AUDIO_DEVICE_IN_FM_TUNER),
-    NAME_TO_ENUM("TV Tuner In", AUDIO_DEVICE_IN_TV_TUNER),
-    NAME_TO_ENUM("Line In", AUDIO_DEVICE_IN_LINE),
-    NAME_TO_ENUM("S/PDIF In", AUDIO_DEVICE_IN_SPDIF),
-    NAME_TO_ENUM("BT A2DP In", AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
-    NAME_TO_ENUM("Loopback In", AUDIO_DEVICE_IN_LOOPBACK),
-    NAME_TO_ENUM("IP In", AUDIO_DEVICE_IN_IP),
-};
-
-const StringToEnum sOutputFlagNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_TTS),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_RAW),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
-};
-
-const StringToEnum sInputFlagNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_INPUT_FLAG_FAST),
-    STRING_TO_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
-    STRING_TO_ENUM(AUDIO_INPUT_FLAG_RAW),
-    STRING_TO_ENUM(AUDIO_INPUT_FLAG_SYNC),
-};
-
-const StringToEnum sFormatNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_32_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_FLOAT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
-    STRING_TO_ENUM(AUDIO_FORMAT_MP3),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_MAIN),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LC),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_SSR),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LTP),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_HE_V1),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_ERLC),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LD),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_HE_V2),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_ELD),
-    STRING_TO_ENUM(AUDIO_FORMAT_VORBIS),
-    STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V1),
-    STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V2),
-    STRING_TO_ENUM(AUDIO_FORMAT_OPUS),
-    STRING_TO_ENUM(AUDIO_FORMAT_AC3),
-    STRING_TO_ENUM(AUDIO_FORMAT_E_AC3),
-    STRING_TO_ENUM(AUDIO_FORMAT_DTS),
-    STRING_TO_ENUM(AUDIO_FORMAT_DTS_HD),
-};
-
-const StringToEnum sOutChannelsNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_MONO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_QUAD),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
-};
-
-const StringToEnum sInChannelsNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_MONO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
-};
-
-const StringToEnum sIndexChannelsNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_1),
-    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_2),
-    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_3),
-    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_4),
-    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_5),
-    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_6),
-    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_7),
-    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_8),
-};
-
-const StringToEnum sGainModeNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_GAIN_MODE_JOINT),
-    STRING_TO_ENUM(AUDIO_GAIN_MODE_CHANNELS),
-    STRING_TO_ENUM(AUDIO_GAIN_MODE_RAMP),
-};
-
 class ConfigParsingUtils
 {
 public:
-    static uint32_t stringToEnum(const struct StringToEnum *table,
-            size_t size,
-            const char *name);
-    static const char *enumToString(const struct StringToEnum *table,
-            size_t size,
-            uint32_t value);
-    static bool stringToBool(const char *value);
-    static uint32_t parseOutputFlagNames(char *name);
-    static uint32_t parseInputFlagNames(char *name);
-    static audio_devices_t parseDeviceNames(char *name);
-
-    static void loadHwModules(cnode *root, HwModuleCollection &hwModules,
-                              DeviceVector &availableInputDevices,
-                              DeviceVector &availableOutputDevices,
-                              sp<DeviceDescriptor> &defaultOutputDevices,
-                              bool &isSpeakerDrcEnabled);
-
-    static void loadGlobalConfig(cnode *root, const sp<HwModule>& module,
-                                 DeviceVector &availableInputDevices,
-                                 DeviceVector &availableOutputDevices,
-                                 sp<DeviceDescriptor> &defaultOutputDevices,
-                                 bool &isSpeakerDrcEnabled);
-
-    static status_t loadAudioPolicyConfig(const char *path,
-                                          HwModuleCollection &hwModules,
-                                          DeviceVector &availableInputDevices,
-                                          DeviceVector &availableOutputDevices,
-                                          sp<DeviceDescriptor> &defaultOutputDevices,
-                                          bool &isSpeakerDrcEnabled);
+    static status_t loadConfig(const char *path, AudioPolicyConfig &config);
 
 private:
-    static void loadHwModule(cnode *root, HwModuleCollection &hwModules,
-                             DeviceVector &availableInputDevices,
-                             DeviceVector &availableOutputDevices,
-                             sp<DeviceDescriptor> &defaultOutputDevices,
-                             bool &isSpeakerDrcEnabled);
+    static void loadAudioPortGain(cnode *root, AudioPort &audioPort, int index);
+    static void loadAudioPortGains(cnode *root, AudioPort &audioPort);
+    static void loadDeviceDescriptorGains(cnode *root, sp<DeviceDescriptor> &deviceDesc);
+    static status_t loadHwModuleDevice(cnode *root, DeviceVector &devices);
+    static status_t loadHwModuleProfile(cnode *root, sp<HwModule> &module, audio_port_role_t role);
+    static void loadDevicesFromTag(const char *tag, DeviceVector &devices,
+                            const DeviceVector &declaredDevices);
+    static void loadHwModules(cnode *root, HwModuleCollection &hwModules,
+                              AudioPolicyConfig &config);
+    static void loadGlobalConfig(cnode *root, AudioPolicyConfig &config,
+                                 const sp<HwModule> &primaryModule);
+    static void loadModuleGlobalConfig(cnode *root, const sp<HwModule> &module,
+                                       AudioPolicyConfig &config);
+    static status_t loadHwModule(cnode *root, sp<HwModule> &module, AudioPolicyConfig &config);
 };
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index c42ece6..ed2450c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -29,10 +29,15 @@
 class DeviceDescriptor : public AudioPort, public AudioPortConfig
 {
 public:
-    DeviceDescriptor(audio_devices_t type);
+     // Note that empty name refers by convention to a generic device.
+    DeviceDescriptor(audio_devices_t type, const String8 &tagName = String8(""));
 
     virtual ~DeviceDescriptor() {}
 
+    virtual const String8 getTagName() const { return mTagName; }
+
+    audio_devices_t type() const { return mDeviceType; }
+
     bool equals(const sp<DeviceDescriptor>& other) const;
 
     // AudioPortConfig
@@ -42,50 +47,44 @@
 
     // AudioPort
     virtual void attach(const sp<HwModule>& module);
-    virtual void loadGains(cnode *root);
     virtual void toAudioPort(struct audio_port *port) const;
     virtual void importAudioPort(const sp<AudioPort> port);
 
     audio_port_handle_t getId() const;
-    audio_devices_t type() const { return mDeviceType; }
-    status_t dump(int fd, int spaces, int index) const;
+    status_t dump(int fd, int spaces, int index, bool verbose = true) const;
     void log() const;
 
-    String8 mTag;
     String8 mAddress;
 
 private:
+    String8 mTagName; // Unique human readable identifier for a device port found in conf file.
     audio_devices_t     mDeviceType;
     audio_port_handle_t mId;
 
 friend class DeviceVector;
 };
 
-class DeviceVector : public SortedVector< sp<DeviceDescriptor> >
+class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
 {
 public:
     DeviceVector() : SortedVector(), mDeviceTypes(AUDIO_DEVICE_NONE) {}
 
     ssize_t add(const sp<DeviceDescriptor>& item);
+    void add(const DeviceVector &devices);
     ssize_t remove(const sp<DeviceDescriptor>& item);
     ssize_t indexOf(const sp<DeviceDescriptor>& item) const;
 
     audio_devices_t types() const { return mDeviceTypes; }
 
-    void loadDevicesFromType(audio_devices_t types);
-    void loadDevicesFromTag(char *tag, const DeviceVector& declaredDevices);
-
     sp<DeviceDescriptor> getDevice(audio_devices_t type, String8 address) const;
     DeviceVector getDevicesFromType(audio_devices_t types) const;
     sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
-    sp<DeviceDescriptor> getDeviceFromTag(const String8& tag) const;
+    sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
     DeviceVector getDevicesFromTypeAddr(audio_devices_t type, String8 address) const;
 
     audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
 
-    audio_policy_dev_state_t getDeviceConnectionState(const sp<DeviceDescriptor> &devDesc) const;
-
-    status_t dump(int fd, const String8 &direction) const;
+    status_t dump(int fd, const String8 &tag, int spaces = 0, bool verbose = true) const;
 
 private:
     void refreshTypes();
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index c9783a1..ab650c0 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -55,8 +55,9 @@
 private:
     status_t setEffectEnabled(const sp<EffectDescriptor> &effectDesc, bool enabled);
 
-    uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects
-    uint32_t mTotalEffectsMemory;  // current memory used by effects
+    uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects (in MIPS)
+    uint32_t mTotalEffectsMemory;  // current memory used by effects (in KB)
+    uint32_t mTotalEffectsMemoryMaxUsed; // maximum memory used by effects (in KB)
 
     /**
      * Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units
diff --git a/services/audiopolicy/enginedefault/src/Gains.h b/services/audiopolicy/common/managerdefinitions/include/Gains.h
similarity index 87%
rename from services/audiopolicy/enginedefault/src/Gains.h
rename to services/audiopolicy/common/managerdefinitions/include/Gains.h
index 7620b7d..34afc8c 100644
--- a/services/audiopolicy/enginedefault/src/Gains.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Gains.h
@@ -29,12 +29,7 @@
 class Gains
 {
 public :
-    static float volIndexToAmpl(Volume::device_category deviceCategory,
-                                const StreamDescriptor& streamDesc,
-                                int indexInUi);
-
-    static float volIndexToDb(Volume::device_category deviceCategory,
-                              const StreamDescriptor& streamDesc,
+    static float volIndexToDb(const VolumeCurvePoint *point, int indexMin, int indexMax,
                               int indexInUi);
 
     // default volume curve
@@ -58,7 +53,7 @@
     static const VolumeCurvePoint sSilentVolumeCurve[Volume::VOLCNT];
     static const VolumeCurvePoint sFullScaleVolumeCurve[Volume::VOLCNT];
     // default volume curves per stream and device category. See initializeVolumeCurves()
-    static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][Volume::DEVICE_CATEGORY_CNT];
+    static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
 };
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 92c3ea2..dd2993d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -17,26 +17,54 @@
 #pragma once
 
 #include "DeviceDescriptor.h"
+#include "AudioRoute.h"
+#include <hardware/audio.h>
 #include <utils/RefBase.h>
 #include <utils/String8.h>
 #include <utils/Errors.h>
 #include <utils/Vector.h>
 #include <system/audio.h>
 #include <cutils/config_utils.h>
+#include <string>
 
 namespace android {
 
 class IOProfile;
+class InputProfile;
+class OutputProfile;
+
+typedef Vector<sp<IOProfile> > InputProfileCollection;
+typedef Vector<sp<IOProfile> > OutputProfileCollection;
+typedef Vector<sp<IOProfile> > IOProfileCollection;
 
 class HwModule : public RefBase
 {
 public:
-    HwModule(const char *name);
+    HwModule(const char *name, uint32_t halVersion = AUDIO_DEVICE_API_VERSION_MIN);
     ~HwModule();
 
-    status_t loadOutput(cnode *root);
-    status_t loadInput(cnode *root);
-    status_t loadDevice(cnode *root);
+    const char *getName() const { return mName.string(); }
+
+
+    const DeviceVector &getDeclaredDevices() const { return mDeclaredDevices; }
+    void setDeclaredDevices(const DeviceVector &devices);
+
+    const InputProfileCollection &getInputProfiles() const { return mInputProfiles; }
+
+    const OutputProfileCollection &getOutputProfiles() const { return mOutputProfiles; }
+
+    void setProfiles(const IOProfileCollection &profiles);
+
+    void setHalVersion(uint32_t halVersion) { mHalVersion = halVersion; }
+    uint32_t getHalVersion() const { return mHalVersion; }
+
+    sp<DeviceDescriptor> getRouteSinkDevice(const sp<AudioRoute> &route) const;
+    DeviceVector getRouteSourceDevices(const sp<AudioRoute> &route) const;
+    void setRoutes(const AudioRouteVector &routes);
+
+    status_t addOutputProfile(const sp<IOProfile> &profile);
+    status_t addInputProfile(const sp<IOProfile> &profile);
+    status_t addProfile(const sp<IOProfile> &profile);
 
     status_t addOutputProfile(String8 name, const audio_config_t *config,
             audio_devices_t device, String8 address);
@@ -47,26 +75,39 @@
 
     audio_module_handle_t getHandle() const { return mHandle; }
 
+    sp<AudioPort> findPortByTagName(const String8 &tagName) const
+    {
+        return mPorts.findByTagName(tagName);
+    }
+
+    // TODO remove from here (split serialization)
     void dump(int fd);
 
-    const char *const        mName; // base name of the audio HW module (primary, a2dp ...)
-    uint32_t                 mHalVersion; // audio HAL API version
-    audio_module_handle_t    mHandle;
-    Vector < sp<IOProfile> > mOutputProfiles; // output profiles exposed by this module
-    Vector < sp<IOProfile> > mInputProfiles;  // input profiles exposed by this module
-    DeviceVector             mDeclaredDevices; // devices declared in audio_policy.conf
+    const String8 mName; // base name of the audio HW module (primary, a2dp ...)
+    audio_module_handle_t mHandle;
+    OutputProfileCollection mOutputProfiles; // output profiles exposed by this module
+    InputProfileCollection mInputProfiles;  // input profiles exposed by this module
+
+private:
+    void refreshSupportedDevices();
+
+    uint32_t mHalVersion; // audio HAL API version
+    DeviceVector mDeclaredDevices; // devices declared in audio_policy configuration file.
+    AudioRouteVector mRoutes;
+    AudioPortVector mPorts;
 };
 
-class HwModuleCollection : public Vector< sp<HwModule> >
+class HwModuleCollection : public Vector<sp<HwModule> >
 {
 public:
     sp<HwModule> getModuleFromName(const char *name) const;
 
-    sp <HwModule> getModuleForDevice(audio_devices_t device) const;
+    sp<HwModule> getModuleForDevice(audio_devices_t device) const;
 
-    sp<DeviceDescriptor>  getDeviceDescriptor(const audio_devices_t device,
-                                              const char *device_address,
-                                              const char *device_name) const;
+    sp<DeviceDescriptor> getDeviceDescriptor(const audio_devices_t device,
+                                             const char *device_address,
+                                             const char *device_name,
+                                             bool matchAdress = true) const;
 
     status_t dump(int fd) const;
 };
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index ab6fcc1..dd20e93 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -33,10 +33,13 @@
 class IOProfile : public AudioPort
 {
 public:
-    IOProfile(const String8& name, audio_port_role_t role);
-    virtual ~IOProfile();
+    IOProfile(const String8 &name, audio_port_role_t role)
+        : AudioPort(name, AUDIO_PORT_TYPE_MIX, role) {}
 
-    // This method is used for both output and input.
+    // For a Profile aka MixPort, tag name and name are equivalent.
+    virtual const String8 getTagName() const { return getName(); }
+
+    // This method is used for input and direct output, and is not used for other output.
     // If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
     // For input, flags is interpreted as audio_input_flags_t.
     // TODO: merge audio_output_flags_t and audio_input_flags_t.
@@ -53,8 +56,67 @@
     void dump(int fd);
     void log();
 
-    DeviceVector  mSupportedDevices; // supported devices
-                                     // (devices this output can be routed to)
+    bool hasSupportedDevices() const { return !mSupportedDevices.isEmpty(); }
+
+    bool supportDevice(audio_devices_t device) const
+    {
+        if (audio_is_output_devices(device)) {
+            return mSupportedDevices.types() & device;
+        }
+        return mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN);
+    }
+
+    bool supportDeviceAddress(const String8 &address) const
+    {
+        return mSupportedDevices[0]->mAddress == address;
+    }
+
+    // chose first device present in mSupportedDevices also part of deviceType
+    audio_devices_t getSupportedDeviceForType(audio_devices_t deviceType) const
+    {
+        for (size_t k = 0; k  < mSupportedDevices.size(); k++) {
+            audio_devices_t profileType = mSupportedDevices[k]->type();
+            if (profileType & deviceType) {
+                return profileType;
+            }
+        }
+        return AUDIO_DEVICE_NONE;
+    }
+
+    audio_devices_t getSupportedDevicesType() const { return mSupportedDevices.types(); }
+
+    void clearSupportedDevices() { mSupportedDevices.clear(); }
+    void addSupportedDevice(const sp<DeviceDescriptor> &device)
+    {
+        mSupportedDevices.add(device);
+    }
+
+    void setSupportedDevices(const DeviceVector &devices)
+    {
+        mSupportedDevices = devices;
+    }
+
+    sp<DeviceDescriptor> getSupportedDeviceByAddress(audio_devices_t type, String8 address) const
+    {
+        return mSupportedDevices.getDevice(type, address);
+    }
+
+    const DeviceVector &getSupportedDevices() const { return mSupportedDevices; }
+
+private:
+    DeviceVector mSupportedDevices; // supported devices: this input/output can be routed from/to
+};
+
+class InputProfile : public IOProfile
+{
+public:
+    InputProfile(const String8 &name) : IOProfile(name, AUDIO_PORT_ROLE_SINK) {}
+};
+
+class OutputProfile : public IOProfile
+{
+public:
+    OutputProfile(const String8 &name) : IOProfile(name, AUDIO_PORT_ROLE_SOURCE) {}
 };
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
new file mode 100644
index 0000000..a3de686
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <Volume.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+class IVolumeCurvesCollection
+{
+public:
+    virtual void clearCurrentVolumeIndex(audio_stream_type_t stream) = 0;
+    virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device,
+                                       int index) = 0;
+    virtual bool canBeMuted(audio_stream_type_t stream) = 0;
+    virtual int getVolumeIndexMin(audio_stream_type_t stream) const = 0;
+    virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device) = 0;
+    virtual int getVolumeIndexMax(audio_stream_type_t stream) const = 0;
+    virtual float volIndexToDb(audio_stream_type_t stream, device_category device,
+                               int indexInUi) const = 0;
+    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) = 0;
+
+    virtual void initializeVolumeCurves(bool /*isSpeakerDrcEnabled*/) {}
+    virtual void switchVolumeCurve(audio_stream_type_t src, audio_stream_type_t dst) = 0;
+    virtual void restoreOriginVolumeCurve(audio_stream_type_t stream)
+    {
+        switchVolumeCurve(stream, stream);
+    }
+    virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
+                                         audio_devices_t device) const = 0;
+
+    virtual status_t dump(int fd) const = 0;
+
+protected:
+    virtual ~IVolumeCurvesCollection() {}
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/Serializer.h b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
new file mode 100644
index 0000000..078b582
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "AudioPolicyConfig.h"
+#include <utils/StrongPointer.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <string>
+#include <sstream>
+#include <fstream>
+
+struct _xmlNode;
+struct _xmlDoc;
+
+namespace android {
+
+struct AudioGainTraits
+{
+    static const char *const tag;
+    static const char *const collectionTag;
+
+    struct Attributes
+    {
+        static const char mode[]; /**< gain modes supported, e.g. AUDIO_GAIN_MODE_CHANNELS. */
+        /** controlled channels, needed if mode AUDIO_GAIN_MODE_CHANNELS. */
+        static const char channelMask[];
+        static const char minValueMB[]; /**< min value in millibel. */
+        static const char maxValueMB[]; /**< max value in millibel. */
+        static const char defaultValueMB[]; /**< default value in millibel. */
+        static const char stepValueMB[]; /**< step value in millibel. */
+        static const char minRampMs[]; /**< needed if mode AUDIO_GAIN_MODE_RAMP. */
+        static const char maxRampMs[]; /**< .needed if mode AUDIO_GAIN_MODE_RAMP */
+    };
+
+    typedef AudioGain Element;
+    typedef sp<Element> PtrElement;
+    typedef AudioGainCollection Collection;
+    typedef void *PtrSerializingCtx;
+
+    static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
+                                PtrSerializingCtx serializingContext);
+
+    // Gain has no child
+};
+
+// A profile section contains a name,  one audio format and the list of supported sampling rates
+// and channel masks for this format
+struct AudioProfileTraits
+{
+    static const char *const tag;
+    static const char *const collectionTag;
+
+    struct Attributes
+    {
+        static const char name[];
+        static const char samplingRates[];
+        static const char format[];
+        static const char channelMasks[];
+    };
+
+    typedef AudioProfile Element;
+    typedef sp<AudioProfile> PtrElement;
+    typedef AudioProfileVector Collection;
+    typedef void *PtrSerializingCtx;
+
+    static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
+                                PtrSerializingCtx serializingContext);
+};
+
+struct MixPortTraits
+{
+    static const char *const tag;
+    static const char *const collectionTag;
+
+    struct Attributes
+    {
+        static const char name[];
+        static const char role[];
+        static const char flags[];
+    };
+
+    typedef IOProfile Element;
+    typedef sp<Element> PtrElement;
+    typedef IOProfileCollection Collection;
+    typedef void *PtrSerializingCtx;
+
+    static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
+                                PtrSerializingCtx serializingContext);
+
+    // Children are: GainTraits
+};
+
+struct DevicePortTraits
+{
+    static const char *const tag;
+    static const char *const collectionTag;
+
+    struct Attributes
+    {
+        static const char tagName[]; /**<  <device tag name>: any string without space. */
+        static const char type[]; /**< <device type>. */
+        static const char role[]; /**< <device role: sink or source>. */
+        static const char roleSource[]; /**< <attribute role source value>. */
+        static const char address[]; /**< optional: device address, char string less than 64. */
+    };
+    typedef DeviceDescriptor Element;
+    typedef sp<DeviceDescriptor> PtrElement;
+    typedef DeviceVector Collection;
+    typedef void *PtrSerializingCtx;
+
+    static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
+                                PtrSerializingCtx serializingContext);
+    // Children are: GainTraits (optionnal)
+};
+
+struct RouteTraits
+{
+    static const char *const tag;
+    static const char *const collectionTag;
+
+    struct Attributes
+    {
+        static const char type[]; /**< <route type>: mix or mux. */
+        static const char typeMix[]; /**< type attribute mix value. */
+        static const char sink[]; /**< <sink: involved in this route>. */
+        static const char sources[]; /**< sources: all source that can be involved in this route. */
+    };
+    typedef AudioRoute Element;
+    typedef sp<AudioRoute> PtrElement;
+    typedef AudioRouteVector Collection;
+    typedef HwModule *PtrSerializingCtx;
+
+    static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
+                                PtrSerializingCtx ctx);
+};
+
+struct ModuleTraits
+{
+    static const char *const tag;
+    static const char *const collectionTag;
+
+    static const char *const childAttachedDevicesTag;
+    static const char *const childAttachedDeviceTag;
+    static const char *const childDefaultOutputDeviceTag;
+
+    struct Attributes
+    {
+        static const char name[];
+        static const char version[];
+    };
+
+    typedef HwModule Element;
+    typedef sp<Element> PtrElement;
+    typedef HwModuleCollection Collection;
+    typedef AudioPolicyConfig *PtrSerializingCtx;
+
+    static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
+                                PtrSerializingCtx serializingContext);
+
+    // Children are: mixPortTraits, devicePortTraits and routeTraits
+    // Need to call deserialize on each child
+};
+
+struct GlobalConfigTraits
+{
+    static const char *const tag;
+
+    struct Attributes
+    {
+        static const char speakerDrcEnabled[];
+    };
+
+    static status_t deserialize(const _xmlNode *root, AudioPolicyConfig &config);
+};
+
+struct VolumeTraits
+{
+    static const char *const tag;
+    static const char *const collectionTag;
+    static const char *const volumePointTag;
+
+    struct Attributes
+    {
+        static const char stream[];
+        static const char deviceCategory[];
+        static const char reference[];
+    };
+
+    typedef VolumeCurve Element;
+    typedef sp<VolumeCurve> PtrElement;
+    typedef VolumeCurvesCollection Collection;
+    typedef void *PtrSerializingCtx;
+
+    static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
+                                PtrSerializingCtx serializingContext);
+
+    // No Child
+};
+
+class PolicySerializer
+{
+private:
+    static const char *const rootName;
+
+    static const char *const versionAttribute;
+    static const uint32_t gMajor; /**< the major number of the policy xml format version. */
+    static const uint32_t gMinor; /**< the minor number of the policy xml format version. */
+
+public:
+    PolicySerializer();
+    status_t deserialize(const char *str, AudioPolicyConfig &config);
+
+private:
+    typedef AudioPolicyConfig Element;
+
+    std::string mRootElementName;
+    std::string mVersion;
+
+    // Children are: ModulesTraits, VolumeTraits
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
index 84db5ab..424df84 100644
--- a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
@@ -16,7 +16,7 @@
 
 #pragma once
 
-#include <Volume.h>
+#include "IVolumeCurvesCollection.h"
 #include <utils/KeyedVector.h>
 #include <utils/StrongPointer.h>
 #include <utils/SortedVector.h>
@@ -38,17 +38,22 @@
     int getVolumeIndexMax() const { return mIndexMax; }
     void setVolumeIndexMin(int volIndexMin);
     void setVolumeIndexMax(int volIndexMax);
+    bool hasVolumeIndexForDevice(audio_devices_t device) const
+    {
+        device = Volume::getDeviceForVolume(device);
+        return mIndexCur.indexOfKey(device) >= 0;
+    }
 
     void dump(int fd) const;
 
-    void setVolumeCurvePoint(Volume::device_category deviceCategory, const VolumeCurvePoint *point);
-    const VolumeCurvePoint *getVolumeCurvePoint(Volume::device_category deviceCategory) const
+    void setVolumeCurvePoint(device_category deviceCategory, const VolumeCurvePoint *point);
+    const VolumeCurvePoint *getVolumeCurvePoint(device_category deviceCategory) const
     {
         return mVolumeCurve[deviceCategory];
     }
 
 private:
-    const VolumeCurvePoint *mVolumeCurve[Volume::DEVICE_CATEGORY_CNT];
+    const VolumeCurvePoint *mVolumeCurve[DEVICE_CATEGORY_CNT];
     KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
     int mIndexMin; /**< min volume index. */
     int mIndexMax; /**< max volume index. */
@@ -58,28 +63,48 @@
 /**
  * stream descriptors collection for volume control
  */
-class StreamDescriptorCollection : public DefaultKeyedVector<audio_stream_type_t, StreamDescriptor>
+class StreamDescriptorCollection : public DefaultKeyedVector<audio_stream_type_t, StreamDescriptor>,
+                                   public IVolumeCurvesCollection
 {
 public:
     StreamDescriptorCollection();
 
-    void clearCurrentVolumeIndex(audio_stream_type_t stream);
-    void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device, int index);
+    virtual void clearCurrentVolumeIndex(audio_stream_type_t stream);
+    virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device,
+                                       int index);
+    virtual bool canBeMuted(audio_stream_type_t stream);
+    virtual int getVolumeIndexMin(audio_stream_type_t stream) const
+    {
+        return valueFor(stream).getVolumeIndexMin();
+    }
+    virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device)
+    {
+        return valueFor(stream).getVolumeIndex(device);
+    }
+    virtual int getVolumeIndexMax(audio_stream_type_t stream) const
+    {
+        return valueFor(stream).getVolumeIndexMax();
+    }
+    virtual float volIndexToDb(audio_stream_type_t stream, device_category device,
+                               int indexInUi) const;
+    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
+    virtual void initializeVolumeCurves(bool isSpeakerDrcEnabled);
+    virtual void switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst);
+    virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
+                                         audio_devices_t device) const
+    {
+        return valueFor(stream).hasVolumeIndexForDevice(device);
+    }
 
-    bool canBeMuted(audio_stream_type_t stream);
+    virtual status_t dump(int fd) const;
 
-    status_t dump(int fd) const;
-
-    void setVolumeCurvePoint(audio_stream_type_t stream,
-                             Volume::device_category deviceCategory,
+private:
+    void setVolumeCurvePoint(audio_stream_type_t stream, device_category deviceCategory,
                              const VolumeCurvePoint *point);
-
     const VolumeCurvePoint *getVolumeCurvePoint(audio_stream_type_t stream,
-                                                Volume::device_category deviceCategory) const;
-
+                                                device_category deviceCategory) const;
     void setVolumeIndexMin(audio_stream_type_t stream,int volIndexMin);
     void setVolumeIndexMax(audio_stream_type_t stream,int volIndexMax);
-
 };
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
new file mode 100644
index 0000000..b828f81
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "policy.h"
+#include <Volume.h>
+#include <system/audio.h>
+#include <convert/convert.h>
+#include <utils/Log.h>
+#include <string>
+#include <utils/Vector.h>
+#include <utils/SortedVector.h>
+
+namespace android {
+
+struct SampleRateTraits
+{
+    typedef uint32_t Type;
+    typedef SortedVector<Type> Collection;
+};
+struct DeviceTraits
+{
+    typedef audio_devices_t Type;
+    typedef Vector<Type> Collection;
+};
+struct OutputFlagTraits
+{
+    typedef audio_output_flags_t Type;
+    typedef Vector<Type> Collection;
+};
+struct InputFlagTraits
+{
+    typedef audio_input_flags_t Type;
+    typedef Vector<Type> Collection;
+};
+struct FormatTraits
+{
+    typedef audio_format_t Type;
+    typedef Vector<Type> Collection;
+};
+struct ChannelTraits
+{
+    typedef audio_channel_mask_t Type;
+    typedef SortedVector<Type> Collection;
+};
+struct OutputChannelTraits : public ChannelTraits {};
+struct InputChannelTraits : public ChannelTraits {};
+struct ChannelIndexTraits : public ChannelTraits {};
+struct GainModeTraits
+{
+    typedef audio_gain_mode_t Type;
+    typedef Vector<Type> Collection;
+};
+struct StreamTraits
+{
+  typedef audio_stream_type_t Type;
+  typedef Vector<Type> Collection;
+};
+struct DeviceCategoryTraits
+{
+  typedef device_category Type;
+  typedef Vector<Type> Collection;
+};
+template <typename T>
+struct DefaultTraits
+{
+  typedef T Type;
+  typedef Vector<Type> Collection;
+};
+
+template <class Traits>
+static void collectionFromString(const std::string &str, typename Traits::Collection &collection,
+                                 const char *del = "|")
+{
+    char *literal = strdup(str.c_str());
+    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+        typename Traits::Type value;
+        if (utilities::convertTo<std::string, typename Traits::Type >(cstr, value)) {
+            collection.add(value);
+        }
+    }
+    free(literal);
+}
+
+template <class Traits>
+class TypeConverter
+{
+public:
+    static bool toString(const typename Traits::Type &value, std::string &str);
+
+    static bool fromString(const std::string &str, typename Traits::Type &result);
+
+    static void collectionFromString(const std::string &str,
+                                     typename Traits::Collection &collection,
+                                     const char *del = "|");
+
+    static uint32_t maskFromString(const std::string &str, const char *del = "|");
+
+protected:
+    struct Table {
+        const char *literal;
+        typename Traits::Type value;
+    };
+
+    static const Table mTable[];
+    static const size_t mSize;
+};
+
+typedef TypeConverter<DeviceTraits> DeviceConverter;
+typedef TypeConverter<OutputFlagTraits> OutputFlagConverter;
+typedef TypeConverter<InputFlagTraits> InputFlagConverter;
+typedef TypeConverter<FormatTraits> FormatConverter;
+typedef TypeConverter<OutputChannelTraits> OutputChannelConverter;
+typedef TypeConverter<InputChannelTraits> InputChannelConverter;
+typedef TypeConverter<ChannelIndexTraits> ChannelIndexConverter;
+typedef TypeConverter<GainModeTraits> GainModeConverter;
+typedef TypeConverter<StreamTraits> StreamTypeConverter;
+typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
+
+static SampleRateTraits::Collection samplingRatesFromString(const std::string &samplingRates,
+                                                            const char *del = "|")
+{
+    SampleRateTraits::Collection samplingRateCollection;
+    collectionFromString<SampleRateTraits>(samplingRates, samplingRateCollection, del);
+    return samplingRateCollection;
+}
+
+static FormatTraits::Collection formatsFromString(const std::string &formats, const char *del = "|")
+{
+    FormatTraits::Collection formatCollection;
+    FormatConverter::collectionFromString(formats, formatCollection, del);
+    return formatCollection;
+}
+
+static audio_format_t formatFromString(const std::string &literalFormat)
+{
+    audio_format_t format;
+    if (literalFormat.empty()) {
+        return gDynamicFormat;
+    }
+    FormatConverter::fromString(literalFormat, format);
+    return format;
+}
+
+static audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
+{
+    audio_channel_mask_t channels;
+    if (!OutputChannelConverter::fromString(literalChannels, channels) ||
+            !InputChannelConverter::fromString(literalChannels, channels)) {
+        return AUDIO_CHANNEL_INVALID;
+    }
+    return channels;
+}
+
+static ChannelTraits::Collection channelMasksFromString(const std::string &channels,
+                                                        const char *del = "|")
+{
+    ChannelTraits::Collection channelMaskCollection;
+    OutputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
+    InputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
+    ChannelIndexConverter::collectionFromString(channels, channelMaskCollection, del);
+    return channelMaskCollection;
+}
+
+static InputChannelTraits::Collection inputChannelMasksFromString(const std::string &inChannels,
+                                                                  const char *del = "|")
+{
+    InputChannelTraits::Collection inputChannelMaskCollection;
+    InputChannelConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
+    ChannelIndexConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
+    return inputChannelMaskCollection;
+}
+
+static OutputChannelTraits::Collection outputChannelMasksFromString(const std::string &outChannels,
+                                                                    const char *del = "|")
+{
+    OutputChannelTraits::Collection outputChannelMaskCollection;
+    OutputChannelConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
+    ChannelIndexConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
+    return outputChannelMaskCollection;
+}
+
+}; // namespace android
+
diff --git a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
new file mode 100644
index 0000000..10f0766
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "IVolumeCurvesCollection.h"
+#include <policy.h>
+#include <hardware/audio.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+#include <string>
+#include <utility>
+
+namespace android {
+
+struct CurvePoint
+{
+    CurvePoint() {}
+    CurvePoint(int index, int attenuationInMb) :
+        mIndex(index), mAttenuationInMb(attenuationInMb) {}
+    uint32_t mIndex;
+    int mAttenuationInMb;
+};
+
+inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
+{
+    return lhs.mIndex < rhs.mIndex;
+}
+
+// A volume curve for a given use case and device category
+// It contains of list of points of this curve expressing the attenuation in Millibels for
+// a given volume index from 0 to 100
+class VolumeCurve : public RefBase
+{
+public:
+    VolumeCurve(device_category device, audio_stream_type_t stream) :
+        mDeviceCategory(device), mStreamType(stream) {}
+
+    device_category getDeviceCategory() const { return mDeviceCategory; }
+    audio_stream_type_t getStreamType() const { return mStreamType; }
+
+    void add(const CurvePoint &point) { mCurvePoints.add(point); }
+
+    float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
+
+    void dump(int fd) const;
+
+private:
+    SortedVector<CurvePoint> mCurvePoints;
+    device_category mDeviceCategory;
+    audio_stream_type_t mStreamType;
+};
+
+// Volume Curves for a given use case indexed by device category
+class VolumeCurvesForStream : public KeyedVector<device_category, sp<VolumeCurve> >
+{
+public:
+    VolumeCurvesForStream() : mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
+    {
+        mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
+    }
+
+    sp<VolumeCurve> getCurvesFor(device_category device) const
+    {
+        if (indexOfKey(device) < 0) {
+            return 0;
+        }
+        return valueFor(device);
+    }
+
+    int getVolumeIndex(audio_devices_t device) const
+    {
+        device = Volume::getDeviceForVolume(device);
+        // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
+        if (mIndexCur.indexOfKey(device) < 0) {
+            device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
+        }
+        return mIndexCur.valueFor(device);
+    }
+
+    bool canBeMuted() const { return mCanBeMuted; }
+    void clearCurrentVolumeIndex() { mIndexCur.clear(); }
+    void addCurrentVolumeIndex(audio_devices_t device, int index) { mIndexCur.add(device, index); }
+
+    void setVolumeIndexMin(int volIndexMin) { mIndexMin = volIndexMin; }
+    int getVolumeIndexMin() const { return mIndexMin; }
+
+    void setVolumeIndexMax(int volIndexMax) { mIndexMax = volIndexMax; }
+    int getVolumeIndexMax() const { return mIndexMax; }
+
+    bool hasVolumeIndexForDevice(audio_devices_t device) const
+    {
+        device = Volume::getDeviceForVolume(device);
+        return mIndexCur.indexOfKey(device) >= 0;
+    }
+
+    const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
+    {
+        ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
+        return mOriginVolumeCurves.valueFor(deviceCategory);
+    }
+    void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
+    {
+        ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
+        replaceValueFor(deviceCategory, volumeCurve);
+    }
+
+    ssize_t add(const sp<VolumeCurve> &volumeCurve)
+    {
+        device_category deviceCategory = volumeCurve->getDeviceCategory();
+        ssize_t index = indexOfKey(deviceCategory);
+        if (index < 0) {
+            // Keep track of original Volume Curves per device category in order to switch curves.
+            mOriginVolumeCurves.add(deviceCategory, volumeCurve);
+            return KeyedVector::add(deviceCategory, volumeCurve);
+        }
+        return index;
+    }
+
+    float volIndexToDb(device_category deviceCat, int indexInUi) const
+    {
+        return getCurvesFor(deviceCat)->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
+    }
+
+    void dump(int fd, int spaces, bool curvePoints = false) const;
+
+private:
+    KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
+    KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
+    int mIndexMin; /**< min volume index. */
+    int mIndexMax; /**< max volume index. */
+    bool mCanBeMuted; /**< true is the stream can be muted. */
+};
+
+// Collection of Volume Curves indexed by use case
+class VolumeCurvesCollection : public KeyedVector<audio_stream_type_t, VolumeCurvesForStream>,
+                               public IVolumeCurvesCollection
+{
+public:
+    VolumeCurvesCollection()
+    {
+        // Create an empty collection of curves
+        for (ssize_t i = 0 ; i < AUDIO_STREAM_CNT; i++) {
+            audio_stream_type_t stream = static_cast<audio_stream_type_t>(i);
+            KeyedVector::add(stream, VolumeCurvesForStream());
+        }
+    }
+
+    // Once XML has been parsed, must be call first to sanity check table and initialize indexes
+    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
+    {
+        editValueAt(stream).setVolumeIndexMin(indexMin);
+        editValueAt(stream).setVolumeIndexMax(indexMax);
+        return NO_ERROR;
+    }
+    virtual void clearCurrentVolumeIndex(audio_stream_type_t stream)
+    {
+        editCurvesFor(stream).clearCurrentVolumeIndex();
+    }
+    virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device, int index)
+    {
+        editCurvesFor(stream).addCurrentVolumeIndex(device, index);
+    }
+    virtual bool canBeMuted(audio_stream_type_t stream) { return getCurvesFor(stream).canBeMuted(); }
+
+    virtual int getVolumeIndexMin(audio_stream_type_t stream) const
+    {
+        return getCurvesFor(stream).getVolumeIndexMin();
+    }
+    virtual int getVolumeIndexMax(audio_stream_type_t stream) const
+    {
+        return getCurvesFor(stream).getVolumeIndexMax();
+    }
+    virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device)
+    {
+        return getCurvesFor(stream).getVolumeIndex(device);
+    }
+    virtual void switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
+    {
+        const VolumeCurvesForStream &sourceCurves = getCurvesFor(streamSrc);
+        VolumeCurvesForStream &dstCurves = editCurvesFor(streamDst);
+        ALOG_ASSERT(sourceCurves.size() == dstCurves.size(), "device category not aligned");
+        for (size_t index = 0; index < sourceCurves.size(); index++) {
+            device_category cat = sourceCurves.keyAt(index);
+            dstCurves.setVolumeCurve(cat, sourceCurves.getOriginVolumeCurve(cat));
+        }
+    }
+    virtual float volIndexToDb(audio_stream_type_t stream, device_category cat, int indexInUi) const
+    {
+        return getCurvesFor(stream).volIndexToDb(cat, indexInUi);
+    }
+    virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
+                                         audio_devices_t device) const
+    {
+        return getCurvesFor(stream).hasVolumeIndexForDevice(device);
+    }
+
+    virtual status_t dump(int fd) const;
+
+    ssize_t add(const sp<VolumeCurve> &volumeCurve)
+    {
+        audio_stream_type_t streamType = volumeCurve->getStreamType();
+        return editCurvesFor(streamType).add(volumeCurve);
+    }
+    VolumeCurvesForStream &editCurvesFor(audio_stream_type_t stream)
+    {
+        ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
+        return editValueAt(stream);
+    }
+    const VolumeCurvesForStream &getCurvesFor(audio_stream_type_t stream) const
+    {
+        ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
+        return valueFor(stream);
+    }
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h b/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
index a393e3b..0a27947 100644
--- a/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
+++ b/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
@@ -47,10 +47,6 @@
 #define DEVICES_TAG "devices"
 #define FLAGS_TAG "flags"
 
-#define DYNAMIC_VALUE_TAG "dynamic" // special value for "channel_masks", "sampling_rates" and
-                                    // "formats" in outputs descriptors indicating that supported
-                                    // values should be queried after opening the output.
-
 #define APM_DEVICES_TAG "devices"
 #define APM_DEVICE_TYPE "type"
 #define APM_DEVICE_ADDRESS "address"
@@ -69,3 +65,7 @@
 #define GAIN_STEP_VALUE "step_value_mB"
 #define GAIN_MIN_RAMP_MS "min_ramp_ms"
 #define GAIN_MAX_RAMP_MS "max_ramp_ms"
+
+#define DYNAMIC_VALUE_TAG "dynamic" // special value for "channel_masks", "sampling_rates" and
+                                    // "formats" in outputs descriptors indicating that supported
+                                    // values should be queried after opening the output.
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
new file mode 100644
index 0000000..635fe4d
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioCollections"
+//#define LOG_NDEBUG 0
+
+#include "AudioCollections.h"
+#include "AudioPort.h"
+#include "AudioRoute.h"
+#include "HwModule.h"
+#include "AudioGain.h"
+
+namespace android {
+
+sp<AudioPort> AudioPortVector::findByTagName(const String8 &tagName) const
+{
+    sp<AudioPort> port = 0;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->getTagName() == tagName) {
+            port = itemAt(i);
+            break;
+        }
+    }
+    return port;
+}
+
+status_t AudioRouteVector::dump(int fd, int spaces) const
+{
+    if (isEmpty()) {
+        return NO_ERROR;
+    }
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\n%*sAudio Routes (%zu):\n", spaces, "", size());
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "%*s- Route %zu:\n", spaces, "", i + 1);
+        write(fd, buffer, strlen(buffer));
+        itemAt(i)->dump(fd, 4);
+    }
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
index fc7b0cc..e454941 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
@@ -25,7 +25,6 @@
 #endif
 
 #include "AudioGain.h"
-#include "StreamDescriptor.h"
 #include <utils/Log.h>
 #include <utils/String8.h>
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 626fdae..6dacaa4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -28,13 +28,11 @@
 
 AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
     : mIoHandle(0),
-      mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL), mPatchHandle(0), mRefCount(0),
-      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false), mId(0)
+      mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
+      mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
 {
     if (profile != NULL) {
-        mSamplingRate = profile->pickSamplingRate();
-        mFormat = profile->pickFormat();
-        mChannelMask = profile->pickChannelMask();
+        profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
         if (profile->mGains.size() > 0) {
             profile->mGains[0]->getDefaultConfig(&mGain);
         }
@@ -50,16 +48,28 @@
 audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
 {
     if (mProfile == 0) {
-        return 0;
+        return AUDIO_MODULE_HANDLE_NONE;
     }
     return mProfile->getModuleHandle();
 }
 
+uint32_t AudioInputDescriptor::getOpenRefCount() const
+{
+    return mSessions.getOpenCount();
+}
+
 audio_port_handle_t AudioInputDescriptor::getId() const
 {
     return mId;
 }
 
+audio_source_t AudioInputDescriptor::inputSource() const
+{
+    // TODO: return highest priority input source
+    return mSessions.size() > 0 ? mSessions.valueAt(0)->inputSource() :
+                       AUDIO_SOURCE_DEFAULT;
+}
+
 void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
                                              const struct audio_port_config *srcConfig) const
 {
@@ -78,7 +88,7 @@
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
     dstConfig->ext.mix.hw_module = getModuleHandle();
     dstConfig->ext.mix.handle = mIoHandle;
-    dstConfig->ext.mix.usecase.source = mInputSource;
+    dstConfig->ext.mix.usecase.source = inputSource();
 }
 
 void AudioInputDescriptor::toAudioPort(struct audio_port *port) const
@@ -113,6 +123,58 @@
     mPreemptedSessions.clear();
 }
 
+bool AudioInputDescriptor::isActive() const {
+    return mSessions.hasActiveSession();
+}
+
+bool AudioInputDescriptor::isSourceActive(audio_source_t source) const
+{
+    return mSessions.isSourceActive(source);
+}
+
+bool AudioInputDescriptor::isSoundTrigger() const {
+    // sound trigger and non sound trigger sessions are not mixed
+    // on a given input
+    return mSessions.valueAt(0)->isSoundTrigger();
+}
+
+sp<AudioSession> AudioInputDescriptor::getAudioSession(
+                                              audio_session_t session) const {
+    return mSessions.valueFor(session);
+}
+
+AudioSessionCollection AudioInputDescriptor::getActiveAudioSessions() const
+{
+    return mSessions.getActiveSessions();
+}
+
+status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
+                         const sp<AudioSession>& audioSession) {
+    return mSessions.addSession(session, audioSession, /*AudioSessionInfoProvider*/this);
+}
+
+status_t AudioInputDescriptor::removeAudioSession(audio_session_t session) {
+    return mSessions.removeSession(session);
+}
+
+audio_patch_handle_t AudioInputDescriptor::getPatchHandle() const
+{
+    return mPatchHandle;
+}
+
+void AudioInputDescriptor::setPatchHandle(audio_patch_handle_t handle)
+{
+    mPatchHandle = handle;
+    mSessions.onSessionInfoUpdate();
+}
+
+audio_config_base_t AudioInputDescriptor::getConfig() const
+{
+    const audio_config_base_t config = { .sample_rate = mSamplingRate, .channel_mask = mChannelMask,
+            .format = mFormat };
+    return config;
+}
+
 status_t AudioInputDescriptor::dump(int fd)
 {
     const size_t SIZE = 256;
@@ -129,13 +191,11 @@
     result.append(buffer);
     snprintf(buffer, SIZE, " Devices %08x\n", mDevice);
     result.append(buffer);
-    snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Open Ref Count %d\n", mOpenRefCount);
-    result.append(buffer);
 
     write(fd, result.string(), result.size());
 
+    mSessions.dump(fd, 1);
+
     return NO_ERROR;
 }
 
@@ -143,10 +203,7 @@
 {
     for (size_t i = 0; i < size(); i++) {
         const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
-        if (inputDescriptor->mRefCount == 0) {
-            continue;
-        }
-        if (inputDescriptor->mInputSource == (int)source) {
+        if (inputDescriptor->isSourceActive(source)) {
             return true;
         }
     }
@@ -169,8 +226,8 @@
 {
     uint32_t count = 0;
     for (size_t i = 0; i < size(); i++) {
-        const sp<AudioInputDescriptor>  desc = valueAt(i);
-        if (desc->mRefCount > 0) {
+        const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
+        if (inputDescriptor->isActive()) {
             count++;
         }
     }
@@ -180,9 +237,10 @@
 audio_io_handle_t AudioInputCollection::getActiveInput(bool ignoreVirtualInputs)
 {
     for (size_t i = 0; i < size(); i++) {
-        const sp<AudioInputDescriptor>  input_descriptor = valueAt(i);
-        if ((input_descriptor->mRefCount > 0)
-                && (!ignoreVirtualInputs || !is_virtual_input_device(input_descriptor->mDevice))) {
+        const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
+        if ((inputDescriptor->isActive())
+                && (!ignoreVirtualInputs ||
+                    !is_virtual_input_device(inputDescriptor->mDevice))) {
             return keyAt(i);
         }
     }
@@ -192,7 +250,7 @@
 audio_devices_t AudioInputCollection::getSupportedDevices(audio_io_handle_t handle) const
 {
     sp<AudioInputDescriptor> inputDesc = valueFor(handle);
-    audio_devices_t devices = inputDesc->mProfile->mSupportedDevices.types();
+    audio_devices_t devices = inputDesc->mProfile->getSupportedDevicesType();
     return devices;
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index a278375..79bbc54 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -34,7 +34,7 @@
 AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
                                              AudioPolicyClientInterface *clientInterface)
     : mPort(port), mDevice(AUDIO_DEVICE_NONE),
-      mPatchHandle(0), mClientInterface(clientInterface), mId(0)
+      mClientInterface(clientInterface), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
 {
     // clear usage count for all stream types
     for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
@@ -47,9 +47,7 @@
         mStrategyMutedByDevice[i] = false;
     }
     if (port != NULL) {
-        mSamplingRate = port->pickSamplingRate();
-        mFormat = port->pickFormat();
-        mChannelMask = port->pickChannelMask();
+        port->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
         if (port->mGains.size() > 0) {
             port->mGains[0]->getDefaultConfig(&mGain);
         }
@@ -220,15 +218,15 @@
 }
 
 // SwAudioOutputDescriptor implementation
-SwAudioOutputDescriptor::SwAudioOutputDescriptor(
-        const sp<IOProfile>& profile, AudioPolicyClientInterface *clientInterface)
+SwAudioOutputDescriptor::SwAudioOutputDescriptor(const sp<IOProfile>& profile,
+                                                 AudioPolicyClientInterface *clientInterface)
     : AudioOutputDescriptor(profile, clientInterface),
     mProfile(profile), mIoHandle(0), mLatency(0),
     mFlags((audio_output_flags_t)0), mPolicyMix(NULL),
     mOutput1(0), mOutput2(0), mDirectOpenCount(0), mGlobalRefCount(0)
 {
     if (profile != NULL) {
-        mFlags = (audio_output_flags_t)profile->mFlags;
+        mFlags = (audio_output_flags_t)profile->getFlags();
     }
 }
 
@@ -283,7 +281,7 @@
     if (isDuplicated()) {
         return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices());
     } else {
-        return mProfile->mSupportedDevices.types() ;
+        return mProfile->getSupportedDevicesType();
     }
 }
 
@@ -317,14 +315,14 @@
     if ((oldGlobalRefCount == 0) && (mGlobalRefCount > 0)) {
         if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
         {
-            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
                     MIX_STATE_MIXING);
         }
 
     } else if ((oldGlobalRefCount > 0) && (mGlobalRefCount == 0)) {
         if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
         {
-            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
                     MIX_STATE_IDLE);
         }
     }
@@ -388,8 +386,64 @@
     return changed;
 }
 
-// SwAudioOutputCollection implementation
+// HwAudioOutputDescriptor implementation
+HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
+                                                 AudioPolicyClientInterface *clientInterface)
+    : AudioOutputDescriptor(source->mDevice, clientInterface),
+      mSource(source)
+{
+}
 
+status_t HwAudioOutputDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    AudioOutputDescriptor::dump(fd);
+
+    snprintf(buffer, SIZE, "Source:\n");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+    mSource->dump(fd);
+
+    return NO_ERROR;
+}
+
+audio_devices_t HwAudioOutputDescriptor::supportedDevices()
+{
+    return mDevice;
+}
+
+void HwAudioOutputDescriptor::toAudioPortConfig(
+                                                 struct audio_port_config *dstConfig,
+                                                 const struct audio_port_config *srcConfig) const
+{
+    mSource->mDevice->toAudioPortConfig(dstConfig, srcConfig);
+}
+
+void HwAudioOutputDescriptor::toAudioPort(
+                                                    struct audio_port *port) const
+{
+    mSource->mDevice->toAudioPort(port);
+}
+
+
+bool HwAudioOutputDescriptor::setVolume(float volume,
+                                        audio_stream_type_t stream,
+                                        audio_devices_t device,
+                                        uint32_t delayMs,
+                                        bool force)
+{
+    bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force);
+
+    if (changed) {
+      // TODO: use gain controller on source device if any to adjust volume
+    }
+    return changed;
+}
+
+// SwAudioOutputCollection implementation
 bool SwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
     nsecs_t sysTime = systemTime();
@@ -473,7 +527,7 @@
 audio_devices_t SwAudioOutputCollection::getSupportedDevices(audio_io_handle_t handle) const
 {
     sp<SwAudioOutputDescriptor> outputDesc = valueFor(handle);
-    audio_devices_t devices = outputDesc->mProfile->mSupportedDevices.types();
+    audio_devices_t devices = outputDesc->mProfile->getSupportedDevicesType();
     return devices;
 }
 
@@ -494,4 +548,49 @@
     return NO_ERROR;
 }
 
+// HwAudioOutputCollection implementation
+bool HwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    nsecs_t sysTime = systemTime();
+    for (size_t i = 0; i < this->size(); i++) {
+        const sp<HwAudioOutputDescriptor> outputDesc = this->valueAt(i);
+        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool HwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
+{
+    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
+        if (s == (size_t) streamToIgnore) {
+            continue;
+        }
+        for (size_t i = 0; i < size(); i++) {
+            const sp<HwAudioOutputDescriptor> outputDesc = valueAt(i);
+            if (outputDesc->mRefCount[s] != 0) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+status_t HwAudioOutputCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nOutputs dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- Output %d dump:\n", keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+
+    return NO_ERROR;
+}
+
 }; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index a06d867..f382dec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -19,7 +19,7 @@
 
 #include "AudioPatch.h"
 #include "AudioGain.h"
-#include "ConfigParsingUtils.h"
+#include "TypeConverter.h"
 #include <cutils/log.h>
 #include <utils/String8.h>
 
@@ -31,7 +31,7 @@
     mHandle(static_cast<audio_patch_handle_t>(android_atomic_inc(&mNextUniqueId))),
     mPatch(*patch),
     mUid(uid),
-    mAfPatchHandle(0)
+    mAfPatchHandle(AUDIO_PATCH_HANDLE_NONE)
 {
 }
 
@@ -53,10 +53,11 @@
     result.append(buffer);
     for (size_t i = 0; i < mPatch.num_sources; i++) {
         if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
+            std::string device;
+            DeviceConverter::toString(mPatch.sources[i].ext.device.type, device);
             snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
-                     mPatch.sources[i].id, ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable,
-                                                        ARRAY_SIZE(sDeviceTypeToEnumTable),
-                                                        mPatch.sources[i].ext.device.type));
+                     mPatch.sources[i].id,
+                     device.c_str());
         } else {
             snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
                      mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
@@ -67,10 +68,11 @@
     result.append(buffer);
     for (size_t i = 0; i < mPatch.num_sinks; i++) {
         if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
+            std::string device;
+            DeviceConverter::toString(mPatch.sinks[i].ext.device.type, device);
             snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
-                     mPatch.sinks[i].id, ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable,
-                                                        ARRAY_SIZE(sDeviceTypeToEnumTable),
-                                                        mPatch.sinks[i].ext.device.type));
+                     mPatch.sinks[i].id,
+                     device.c_str());
         } else {
             snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
                      mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
@@ -126,14 +128,35 @@
 
     size_t patchesWritten = 0;
     size_t patchesMax = *num_patches;
-    for (size_t i = 0; i  < size() && patchesWritten < patchesMax; i++) {
-        const sp<AudioPatch>  patch = valueAt(i);
-        patches[patchesWritten] = patch->mPatch;
-        patches[patchesWritten++].id = patch->mHandle;
+    *num_patches = 0;
+    for (size_t patchIndex = 0; patchIndex < size(); patchIndex++) {
+        // do not report patches with AUDIO_DEVICE_IN_STUB as source or
+        // AUDIO_DEVICE_OUT_STUB as sink as those devices are used by stub HALs by convention
+        const sp<AudioPatch> patch = valueAt(patchIndex);
+        bool skip = false;
+        for (size_t srcIndex = 0; srcIndex < patch->mPatch.num_sources && !skip; srcIndex++) {
+            if (patch->mPatch.sources[srcIndex].type == AUDIO_PORT_TYPE_DEVICE &&
+                    patch->mPatch.sources[srcIndex].ext.device.type == AUDIO_DEVICE_IN_STUB) {
+                skip = true;
+            }
+        }
+        for (size_t sinkIndex = 0; sinkIndex < patch->mPatch.num_sinks && !skip; sinkIndex++) {
+            if (patch->mPatch.sinks[sinkIndex].type == AUDIO_PORT_TYPE_DEVICE &&
+                    patch->mPatch.sinks[sinkIndex].ext.device.type == AUDIO_DEVICE_OUT_STUB) {
+                skip = true;
+            }
+        }
+        if (skip) {
+            continue; // to next audio patch
+        }
+        if (patchesWritten < patchesMax) {
+            patches[patchesWritten] = patch->mPatch;
+            patches[patchesWritten++].id = patch->mHandle;
+        }
+        (*num_patches)++;
         ALOGV("listAudioPatches() patch %zu num_sources %d num_sinks %d",
-              i, patch->mPatch.num_sources, patch->mPatch.num_sinks);
+              patchIndex, patch->mPatch.num_sources, patch->mPatch.num_sinks);
     }
-    *num_patches = size();
 
     ALOGV("listAudioPatches() got %zu patches needed %d", patchesWritten, *num_patches);
     return NO_ERROR;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 6f1998c..7ee98b6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "APM::AudioPolicyMix"
+#define LOG_TAG "APM_AudioPolicyMix"
 //#define LOG_NDEBUG 0
 
 #include "AudioPolicyMix.h"
@@ -51,7 +51,8 @@
     return &mMix;
 }
 
-status_t AudioPolicyMixCollection::registerMix(String8 address, AudioMix mix)
+status_t AudioPolicyMixCollection::registerMix(String8 address, AudioMix mix,
+                                               sp<SwAudioOutputDescriptor> desc)
 {
     ssize_t index = indexOfKey(address);
     if (index >= 0) {
@@ -61,6 +62,11 @@
     sp<AudioPolicyMix> policyMix = new AudioPolicyMix();
     policyMix->setMix(mix);
     add(address, policyMix);
+
+    if (desc != 0) {
+        desc->mPolicyMix = policyMix->getMix();
+        policyMix->setOutput(desc);
+    }
     return NO_ERROR;
 }
 
@@ -98,35 +104,118 @@
     }
 }
 
-status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes,
+status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes, uid_t uid,
                                                     sp<SwAudioOutputDescriptor> &desc)
 {
+    ALOGV("getOutputForAttr() querying %zu mixes:", size());
+    desc = 0;
     for (size_t i = 0; i < size(); i++) {
         sp<AudioPolicyMix> policyMix = valueAt(i);
         AudioMix *mix = policyMix->getMix();
 
         if (mix->mMixType == MIX_TYPE_PLAYERS) {
+            // TODO if adding more player rules (currently only 2), make rule handling "generic"
+            //      as there is no difference in the treatment of usage- or uid-based rules
+            bool hasUsageMatchRules = false;
+            bool hasUsageExcludeRules = false;
+            bool usageMatchFound = false;
+            bool usageExclusionFound = false;
+
+            bool hasUidMatchRules = false;
+            bool hasUidExcludeRules = false;
+            bool uidMatchFound = false;
+            bool uidExclusionFound = false;
+
+            bool hasAddrMatch = false;
+
+            // iterate over all mix criteria to list what rules this mix contains
             for (size_t j = 0; j < mix->mCriteria.size(); j++) {
-                if ((RULE_MATCH_ATTRIBUTE_USAGE == mix->mCriteria[j].mRule &&
-                     mix->mCriteria[j].mAttr.mUsage == attributes.usage) ||
-                        (RULE_EXCLUDE_ATTRIBUTE_USAGE == mix->mCriteria[j].mRule &&
-                         mix->mCriteria[j].mAttr.mUsage != attributes.usage)) {
-                    desc = policyMix->getOutput();
-                    break;
-                }
+                ALOGV(" getOutputForAttr: mix %zu: inspecting mix criteria %zu of %zu",
+                        i, j, mix->mCriteria.size());
+
+                // if there is an address match, prioritize that match
                 if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
                         strncmp(attributes.tags + strlen("addr="),
-                                mix->mRegistrationId.string(),
+                                mix->mDeviceAddress.string(),
                                 AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-                    desc = policyMix->getOutput();
+                    hasAddrMatch = true;
                     break;
                 }
+
+                switch (mix->mCriteria[j].mRule) {
+                case RULE_MATCH_ATTRIBUTE_USAGE:
+                    ALOGV("\tmix has RULE_MATCH_ATTRIBUTE_USAGE for usage %d",
+                                                mix->mCriteria[j].mValue.mUsage);
+                    hasUsageMatchRules = true;
+                    if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
+                        // found one match against all allowed usages
+                        usageMatchFound = true;
+                    }
+                    break;
+                case RULE_EXCLUDE_ATTRIBUTE_USAGE:
+                    ALOGV("\tmix has RULE_EXCLUDE_ATTRIBUTE_USAGE for usage %d",
+                            mix->mCriteria[j].mValue.mUsage);
+                    hasUsageExcludeRules = true;
+                    if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
+                        // found this usage is to be excluded
+                        usageExclusionFound = true;
+                    }
+                    break;
+                case RULE_MATCH_UID:
+                    ALOGV("\tmix has RULE_MATCH_UID for uid %d", mix->mCriteria[j].mValue.mUid);
+                    hasUidMatchRules = true;
+                    if (mix->mCriteria[j].mValue.mUid == uid) {
+                        // found one UID match against all allowed UIDs
+                        uidMatchFound = true;
+                    }
+                    break;
+                case RULE_EXCLUDE_UID:
+                    ALOGV("\tmix has RULE_EXCLUDE_UID for uid %d", mix->mCriteria[j].mValue.mUid);
+                    hasUidExcludeRules = true;
+                    if (mix->mCriteria[j].mValue.mUid == uid) {
+                        // found this UID is to be excluded
+                        uidExclusionFound = true;
+                    }
+                    break;
+                default:
+                    break;
+                }
+
+                // consistency checks: for each "dimension" of rules (usage, uid...), we can
+                // only have MATCH rules, or EXCLUDE rules in each dimension, not a combination
+                if (hasUsageMatchRules && hasUsageExcludeRules) {
+                    ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_ATTRIBUTE_USAGE"
+                            " and RULE_EXCLUDE_ATTRIBUTE_USAGE in mix %zu", i);
+                    return BAD_VALUE;
+                }
+                if (hasUidMatchRules && hasUidExcludeRules) {
+                    ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_UID"
+                            " and RULE_EXCLUDE_UID in mix %zu", i);
+                    return BAD_VALUE;
+                }
+
+                if ((hasUsageExcludeRules && usageExclusionFound)
+                        || (hasUidExcludeRules && uidExclusionFound)) {
+                    break; // stop iterating on criteria because an exclusion was found (will fail)
+                }
+
+            }//iterate on mix criteria
+
+            // determine if exiting on success (or implicit failure as desc is 0)
+            if (hasAddrMatch ||
+                    !((hasUsageExcludeRules && usageExclusionFound) ||
+                      (hasUsageMatchRules && !usageMatchFound)  ||
+                      (hasUidExcludeRules && uidExclusionFound) ||
+                      (hasUidMatchRules && !uidMatchFound))) {
+                ALOGV("\tgetOutputForAttr will use mix %zu", i);
+                desc = policyMix->getOutput();
             }
+
         } else if (mix->mMixType == MIX_TYPE_RECORDERS) {
             if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
                     strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
                     strncmp(attributes.tags + strlen("addr="),
-                            mix->mRegistrationId.string(),
+                            mix->mDeviceAddress.string(),
                             AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
                 desc = policyMix->getOutput();
             }
@@ -151,9 +240,9 @@
         }
         for (size_t j = 0; j < mix->mCriteria.size(); j++) {
             if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
-                    mix->mCriteria[j].mAttr.mSource == inputSource) ||
+                    mix->mCriteria[j].mValue.mSource == inputSource) ||
                (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
-                    mix->mCriteria[j].mAttr.mSource != inputSource)) {
+                    mix->mCriteria[j].mValue.mSource != inputSource)) {
                 if (availDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
                     if (policyMix != NULL) {
                         *policyMix = mix;
@@ -174,6 +263,15 @@
     }
     String8 address(attr.tags + strlen("addr="));
 
+#ifdef LOG_NDEBUG
+    ALOGV("getInputMixForAttr looking for address %s\n  mixes available:", address.string());
+    for (size_t i = 0; i < size(); i++) {
+            sp<AudioPolicyMix> policyMix = valueAt(i);
+            AudioMix *mix = policyMix->getMix();
+            ALOGV("\tmix %zu address=%s", i, mix->mDeviceAddress.string());
+    }
+#endif
+
     ssize_t index = indexOfKey(address);
     if (index < 0) {
         ALOGW("getInputMixForAttr() no policy for address %s", address.string());
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 4e24f19..17ed537 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -16,33 +16,27 @@
 
 #define LOG_TAG "APM::AudioPort"
 //#define LOG_NDEBUG 0
-#include <media/AudioResamplerPublic.h>
+#include "TypeConverter.h"
 #include "AudioPort.h"
 #include "HwModule.h"
 #include "AudioGain.h"
-#include "ConfigParsingUtils.h"
-#include "audio_policy_conf.h"
 #include <policy.h>
 
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
 namespace android {
 
 int32_t volatile AudioPort::mNextUniqueId = 1;
 
 // --- AudioPort class implementation
-
-AudioPort::AudioPort(const String8& name, audio_port_type_t type,
-                     audio_port_role_t role) :
-    mName(name), mType(type), mRole(role), mFlags(0)
-{
-    mUseInChannelMask = ((type == AUDIO_PORT_TYPE_DEVICE) && (role == AUDIO_PORT_ROLE_SOURCE)) ||
-                    ((type == AUDIO_PORT_TYPE_MIX) && (role == AUDIO_PORT_ROLE_SINK));
-}
-
 void AudioPort::attach(const sp<HwModule>& module)
 {
     mModule = module;
 }
 
+// Note that is a different namespace than AudioFlinger unique IDs
 audio_port_handle_t AudioPort::getNextUniqueId()
 {
     return static_cast<audio_port_handle_t>(android_atomic_inc(&mNextUniqueId));
@@ -51,7 +45,7 @@
 audio_module_handle_t AudioPort::getModuleHandle() const
 {
     if (mModule == 0) {
-        return 0;
+        return AUDIO_MODULE_HANDLE_NONE;
     }
     return mModule->mHandle;
 }
@@ -61,605 +55,178 @@
     if (mModule == 0) {
         return 0;
     }
-    return mModule->mHalVersion;
+    return mModule->getHalVersion();
 }
 
 const char *AudioPort::getModuleName() const
 {
     if (mModule == 0) {
-        return "";
+        return "invalid module";
     }
-    return mModule->mName;
+    return mModule->getName();
 }
 
 void AudioPort::toAudioPort(struct audio_port *port) const
 {
+    // TODO: update this function once audio_port structure reflects the new profile definition.
+    // For compatibility reason: flatening the AudioProfile into audio_port structure.
+    SortedVector<audio_format_t> flatenedFormats;
+    SampleRateVector flatenedRates;
+    ChannelsVector flatenedChannels;
+    for (size_t profileIndex = 0; profileIndex < mProfiles.size(); profileIndex++) {
+        if (mProfiles[profileIndex]->isValid()) {
+            audio_format_t formatToExport = mProfiles[profileIndex]->getFormat();
+            const SampleRateVector &ratesToExport = mProfiles[profileIndex]->getSampleRates();
+            const ChannelsVector &channelsToExport = mProfiles[profileIndex]->getChannels();
+
+            if (flatenedFormats.indexOf(formatToExport) < 0) {
+                flatenedFormats.add(formatToExport);
+            }
+            for (size_t rateIndex = 0; rateIndex < ratesToExport.size(); rateIndex++) {
+                uint32_t rate = ratesToExport[rateIndex];
+                if (flatenedRates.indexOf(rate) < 0) {
+                    flatenedRates.add(rate);
+                }
+            }
+            for (size_t chanIndex = 0; chanIndex < channelsToExport.size(); chanIndex++) {
+                audio_channel_mask_t channels = channelsToExport[chanIndex];
+                if (flatenedChannels.indexOf(channels) < 0) {
+                    flatenedChannels.add(channels);
+                }
+            }
+            if (flatenedRates.size() > AUDIO_PORT_MAX_SAMPLING_RATES ||
+                    flatenedChannels.size() > AUDIO_PORT_MAX_CHANNEL_MASKS ||
+                    flatenedFormats.size() > AUDIO_PORT_MAX_FORMATS) {
+                ALOGE("%s: bailing out: cannot export profiles to port config", __FUNCTION__);
+                return;
+            }
+        }
+    }
     port->role = mRole;
     port->type = mType;
     strlcpy(port->name, mName, AUDIO_PORT_MAX_NAME_LEN);
-    unsigned int i;
-    for (i = 0; i < mSamplingRates.size() && i < AUDIO_PORT_MAX_SAMPLING_RATES; i++) {
-        if (mSamplingRates[i] != 0) {
-            port->sample_rates[i] = mSamplingRates[i];
-        }
+    port->num_sample_rates = flatenedRates.size();
+    port->num_channel_masks = flatenedChannels.size();
+    port->num_formats = flatenedFormats.size();
+    for (size_t i = 0; i < flatenedRates.size(); i++) {
+        port->sample_rates[i] = flatenedRates[i];
     }
-    port->num_sample_rates = i;
-    for (i = 0; i < mChannelMasks.size() && i < AUDIO_PORT_MAX_CHANNEL_MASKS; i++) {
-        if (mChannelMasks[i] != 0) {
-            port->channel_masks[i] = mChannelMasks[i];
-        }
+    for (size_t i = 0; i < flatenedChannels.size(); i++) {
+        port->channel_masks[i] = flatenedChannels[i];
     }
-    port->num_channel_masks = i;
-    for (i = 0; i < mFormats.size() && i < AUDIO_PORT_MAX_FORMATS; i++) {
-        if (mFormats[i] != 0) {
-            port->formats[i] = mFormats[i];
-        }
+    for (size_t i = 0; i < flatenedFormats.size(); i++) {
+        port->formats[i] = flatenedFormats[i];
     }
-    port->num_formats = i;
 
     ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size());
 
+    uint32_t i;
     for (i = 0; i < mGains.size() && i < AUDIO_PORT_MAX_GAINS; i++) {
-        port->gains[i] = mGains[i]->mGain;
+        port->gains[i] = mGains[i]->getGain();
     }
     port->num_gains = i;
 }
 
-void AudioPort::importAudioPort(const sp<AudioPort> port) {
-    for (size_t k = 0 ; k < port->mSamplingRates.size() ; k++) {
-        const uint32_t rate = port->mSamplingRates.itemAt(k);
-        if (rate != 0) { // skip "dynamic" rates
-            bool hasRate = false;
-            for (size_t l = 0 ; l < mSamplingRates.size() ; l++) {
-                if (rate == mSamplingRates.itemAt(l)) {
-                    hasRate = true;
+void AudioPort::importAudioPort(const sp<AudioPort> port)
+{
+    size_t indexToImport;
+    for (indexToImport = 0; indexToImport < port->mProfiles.size(); indexToImport++) {
+        const sp<AudioProfile> &profileToImport = port->mProfiles[indexToImport];
+        if (profileToImport->isValid()) {
+            // Import only valid port, i.e. valid format, non empty rates and channels masks
+            bool hasSameProfile = false;
+            for (size_t profileIndex = 0; profileIndex < mProfiles.size(); profileIndex++) {
+                if (*mProfiles[profileIndex] == *profileToImport) {
+                    // never import a profile twice
+                    hasSameProfile = true;
                     break;
                 }
             }
-            if (!hasRate) { // never import a sampling rate twice
-                mSamplingRates.add(rate);
+            if (hasSameProfile) { // never import a same profile twice
+                continue;
             }
-        }
-    }
-    for (size_t k = 0 ; k < port->mChannelMasks.size() ; k++) {
-        const audio_channel_mask_t mask = port->mChannelMasks.itemAt(k);
-        if (mask != 0) { // skip "dynamic" masks
-            bool hasMask = false;
-            for (size_t l = 0 ; l < mChannelMasks.size() ; l++) {
-                if (mask == mChannelMasks.itemAt(l)) {
-                    hasMask = true;
-                    break;
-                }
-            }
-            if (!hasMask) { // never import a channel mask twice
-                mChannelMasks.add(mask);
-            }
-        }
-    }
-    for (size_t k = 0 ; k < port->mFormats.size() ; k++) {
-        const audio_format_t format = port->mFormats.itemAt(k);
-        if (format != 0) { // skip "dynamic" formats
-            bool hasFormat = false;
-            for (size_t l = 0 ; l < mFormats.size() ; l++) {
-                if (format == mFormats.itemAt(l)) {
-                    hasFormat = true;
-                    break;
-                }
-            }
-            if (!hasFormat) { // never import a format twice
-                mFormats.add(format);
-            }
+            addAudioProfile(profileToImport);
         }
     }
 }
 
-void AudioPort::clearCapabilities() {
-    mChannelMasks.clear();
-    mFormats.clear();
-    mSamplingRates.clear();
-}
-
-void AudioPort::loadSamplingRates(char *name)
+void AudioPort::pickSamplingRate(uint32_t &pickedRate,const SampleRateVector &samplingRates) const
 {
-    char *str = strtok(name, "|");
-
-    // by convention, "0' in the first entry in mSamplingRates indicates the supported sampling
-    // rates should be read from the output stream after it is opened for the first time
-    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
-        mSamplingRates.add(0);
-        return;
-    }
-
-    while (str != NULL) {
-        uint32_t rate = atoi(str);
-        if (rate != 0) {
-            ALOGV("loadSamplingRates() adding rate %d", rate);
-            mSamplingRates.add(rate);
-        }
-        str = strtok(NULL, "|");
-    }
-}
-
-void AudioPort::loadFormats(char *name)
-{
-    char *str = strtok(name, "|");
-
-    // by convention, "0' in the first entry in mFormats indicates the supported formats
-    // should be read from the output stream after it is opened for the first time
-    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
-        mFormats.add(AUDIO_FORMAT_DEFAULT);
-        return;
-    }
-
-    while (str != NULL) {
-        audio_format_t format = (audio_format_t)ConfigParsingUtils::stringToEnum(sFormatNameToEnumTable,
-                                                             ARRAY_SIZE(sFormatNameToEnumTable),
-                                                             str);
-        if (format != AUDIO_FORMAT_DEFAULT) {
-            mFormats.add(format);
-        }
-        str = strtok(NULL, "|");
-    }
-    // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
-    // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
-    // [](const audio_format_t *format1, const audio_format_t *format2) {
-    //     return compareFormats(*format1, *format2);
-    // }
-    mFormats.sort(compareFormats);
-}
-
-void AudioPort::loadInChannels(char *name)
-{
-    const char *str = strtok(name, "|");
-
-    ALOGV("loadInChannels() %s", name);
-
-    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
-        mChannelMasks.add(0);
-        return;
-    }
-
-    while (str != NULL) {
-        audio_channel_mask_t channelMask =
-                (audio_channel_mask_t)ConfigParsingUtils::stringToEnum(sInChannelsNameToEnumTable,
-                                                   ARRAY_SIZE(sInChannelsNameToEnumTable),
-                                                   str);
-        if (channelMask == 0) { // if not found, check the channel index table
-            channelMask = (audio_channel_mask_t)
-                      ConfigParsingUtils::stringToEnum(sIndexChannelsNameToEnumTable,
-                              ARRAY_SIZE(sIndexChannelsNameToEnumTable),
-                              str);
-        }
-        if (channelMask != 0) {
-            ALOGV("loadInChannels() adding channelMask %#x", channelMask);
-            mChannelMasks.add(channelMask);
-        }
-        str = strtok(NULL, "|");
-    }
-}
-
-void AudioPort::loadOutChannels(char *name)
-{
-    const char *str = strtok(name, "|");
-
-    ALOGV("loadOutChannels() %s", name);
-
-    // by convention, "0' in the first entry in mChannelMasks indicates the supported channel
-    // masks should be read from the output stream after it is opened for the first time
-    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
-        mChannelMasks.add(0);
-        return;
-    }
-
-    while (str != NULL) {
-        audio_channel_mask_t channelMask =
-                (audio_channel_mask_t)ConfigParsingUtils::stringToEnum(sOutChannelsNameToEnumTable,
-                                                   ARRAY_SIZE(sOutChannelsNameToEnumTable),
-                                                   str);
-        if (channelMask == 0) { // if not found, check the channel index table
-            channelMask = (audio_channel_mask_t)
-                      ConfigParsingUtils::stringToEnum(sIndexChannelsNameToEnumTable,
-                              ARRAY_SIZE(sIndexChannelsNameToEnumTable),
-                              str);
-        }
-        if (channelMask != 0) {
-            mChannelMasks.add(channelMask);
-        }
-        str = strtok(NULL, "|");
-    }
-    return;
-}
-
-audio_gain_mode_t AudioPort::loadGainMode(char *name)
-{
-    const char *str = strtok(name, "|");
-
-    ALOGV("loadGainMode() %s", name);
-    audio_gain_mode_t mode = 0;
-    while (str != NULL) {
-        mode |= (audio_gain_mode_t)ConfigParsingUtils::stringToEnum(sGainModeNameToEnumTable,
-                                                ARRAY_SIZE(sGainModeNameToEnumTable),
-                                                str);
-        str = strtok(NULL, "|");
-    }
-    return mode;
-}
-
-void AudioPort::loadGain(cnode *root, int index)
-{
-    cnode *node = root->first_child;
-
-    sp<AudioGain> gain = new AudioGain(index, mUseInChannelMask);
-
-    while (node) {
-        if (strcmp(node->name, GAIN_MODE) == 0) {
-            gain->mGain.mode = loadGainMode((char *)node->value);
-        } else if (strcmp(node->name, GAIN_CHANNELS) == 0) {
-            if (mUseInChannelMask) {
-                gain->mGain.channel_mask =
-                        (audio_channel_mask_t)ConfigParsingUtils::stringToEnum(sInChannelsNameToEnumTable,
-                                                           ARRAY_SIZE(sInChannelsNameToEnumTable),
-                                                           (char *)node->value);
-            } else {
-                gain->mGain.channel_mask =
-                        (audio_channel_mask_t)ConfigParsingUtils::stringToEnum(sOutChannelsNameToEnumTable,
-                                                           ARRAY_SIZE(sOutChannelsNameToEnumTable),
-                                                           (char *)node->value);
-            }
-        } else if (strcmp(node->name, GAIN_MIN_VALUE) == 0) {
-            gain->mGain.min_value = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_MAX_VALUE) == 0) {
-            gain->mGain.max_value = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_DEFAULT_VALUE) == 0) {
-            gain->mGain.default_value = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_STEP_VALUE) == 0) {
-            gain->mGain.step_value = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_MIN_RAMP_MS) == 0) {
-            gain->mGain.min_ramp_ms = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_MAX_RAMP_MS) == 0) {
-            gain->mGain.max_ramp_ms = atoi((char *)node->value);
-        }
-        node = node->next;
-    }
-
-    ALOGV("loadGain() adding new gain mode %08x channel mask %08x min mB %d max mB %d",
-          gain->mGain.mode, gain->mGain.channel_mask, gain->mGain.min_value, gain->mGain.max_value);
-
-    if (gain->mGain.mode == 0) {
-        return;
-    }
-    mGains.add(gain);
-}
-
-void AudioPort::loadGains(cnode *root)
-{
-    cnode *node = root->first_child;
-    int index = 0;
-    while (node) {
-        ALOGV("loadGains() loading gain %s", node->name);
-        loadGain(node, index++);
-        node = node->next;
-    }
-}
-
-status_t AudioPort::checkExactSamplingRate(uint32_t samplingRate) const
-{
-    if (mSamplingRates.isEmpty()) {
-        return NO_ERROR;
-    }
-
-    for (size_t i = 0; i < mSamplingRates.size(); i ++) {
-        if (mSamplingRates[i] == samplingRate) {
-            return NO_ERROR;
-        }
-    }
-    return BAD_VALUE;
-}
-
-status_t AudioPort::checkCompatibleSamplingRate(uint32_t samplingRate,
-        uint32_t *updatedSamplingRate) const
-{
-    if (mSamplingRates.isEmpty()) {
-        if (updatedSamplingRate != NULL) {
-            *updatedSamplingRate = samplingRate;
-        }
-        return NO_ERROR;
-    }
-
-    // Search for the closest supported sampling rate that is above (preferred)
-    // or below (acceptable) the desired sampling rate, within a permitted ratio.
-    // The sampling rates do not need to be sorted in ascending order.
-    ssize_t maxBelow = -1;
-    ssize_t minAbove = -1;
-    uint32_t candidate;
-    for (size_t i = 0; i < mSamplingRates.size(); i++) {
-        candidate = mSamplingRates[i];
-        if (candidate == samplingRate) {
-            if (updatedSamplingRate != NULL) {
-                *updatedSamplingRate = candidate;
-            }
-            return NO_ERROR;
-        }
-        // candidate < desired
-        if (candidate < samplingRate) {
-            if (maxBelow < 0 || candidate > mSamplingRates[maxBelow]) {
-                maxBelow = i;
-            }
-        // candidate > desired
-        } else {
-            if (minAbove < 0 || candidate < mSamplingRates[minAbove]) {
-                minAbove = i;
-            }
-        }
-    }
-
-    // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
-    if (minAbove >= 0) {
-        candidate = mSamplingRates[minAbove];
-        if (candidate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
-            if (updatedSamplingRate != NULL) {
-                *updatedSamplingRate = candidate;
-            }
-            return NO_ERROR;
-        }
-    }
-    // But if we have to up-sample from a lower sampling rate, that's OK.
-    if (maxBelow >= 0) {
-        candidate = mSamplingRates[maxBelow];
-        if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
-            if (updatedSamplingRate != NULL) {
-                *updatedSamplingRate = candidate;
-            }
-            return NO_ERROR;
-        }
-    }
-    // leave updatedSamplingRate unmodified
-    return BAD_VALUE;
-}
-
-status_t AudioPort::checkExactChannelMask(audio_channel_mask_t channelMask) const
-{
-    if (mChannelMasks.isEmpty()) {
-        return NO_ERROR;
-    }
-
-    for (size_t i = 0; i < mChannelMasks.size(); i++) {
-        if (mChannelMasks[i] == channelMask) {
-            return NO_ERROR;
-        }
-    }
-    return BAD_VALUE;
-}
-
-status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask,
-        audio_channel_mask_t *updatedChannelMask) const
-{
-    if (mChannelMasks.isEmpty()) {
-        if (updatedChannelMask != NULL) {
-            *updatedChannelMask = channelMask;
-        }
-        return NO_ERROR;
-    }
-
-    const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
-    const bool isIndex = audio_channel_mask_get_representation(channelMask)
-            == AUDIO_CHANNEL_REPRESENTATION_INDEX;
-    int bestMatch = 0;
-    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
-        audio_channel_mask_t supported = mChannelMasks[i];
-        if (supported == channelMask) {
-            // Exact matches always taken.
-            if (updatedChannelMask != NULL) {
-                *updatedChannelMask = channelMask;
-            }
-            return NO_ERROR;
-        }
-
-        // AUDIO_CHANNEL_NONE (value: 0) is used for dynamic channel support
-        if (isRecordThread && supported != AUDIO_CHANNEL_NONE) {
-            // Approximate (best) match:
-            // The match score measures how well the supported channel mask matches the
-            // desired mask, where increasing-is-better.
-            //
-            // TODO: Some tweaks may be needed.
-            // Should be a static function of the data processing library.
-            //
-            // In priority:
-            // match score = 1000 if legacy channel conversion equivalent (always prefer this)
-            // OR
-            // match score += 100 if the channel mask representations match
-            // match score += number of channels matched.
-            //
-            // If there are no matched channels, the mask may still be accepted
-            // but the playback or record will be silent.
-            const bool isSupportedIndex = (audio_channel_mask_get_representation(supported)
-                    == AUDIO_CHANNEL_REPRESENTATION_INDEX);
-            int match;
-            if (isIndex && isSupportedIndex) {
-                // index equivalence
-                match = 100 + __builtin_popcount(
-                        audio_channel_mask_get_bits(channelMask)
-                            & audio_channel_mask_get_bits(supported));
-            } else if (isIndex && !isSupportedIndex) {
-                const uint32_t equivalentBits =
-                        (1 << audio_channel_count_from_in_mask(supported)) - 1 ;
-                match = __builtin_popcount(
-                        audio_channel_mask_get_bits(channelMask) & equivalentBits);
-            } else if (!isIndex && isSupportedIndex) {
-                const uint32_t equivalentBits =
-                        (1 << audio_channel_count_from_in_mask(channelMask)) - 1;
-                match = __builtin_popcount(
-                        equivalentBits & audio_channel_mask_get_bits(supported));
-            } else {
-                // positional equivalence
-                match = 100 + __builtin_popcount(
-                        audio_channel_mask_get_bits(channelMask)
-                            & audio_channel_mask_get_bits(supported));
-                switch (supported) {
-                case AUDIO_CHANNEL_IN_FRONT_BACK:
-                case AUDIO_CHANNEL_IN_STEREO:
-                    if (channelMask == AUDIO_CHANNEL_IN_MONO) {
-                        match = 1000;
-                    }
-                    break;
-                case AUDIO_CHANNEL_IN_MONO:
-                    if (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
-                            || channelMask == AUDIO_CHANNEL_IN_STEREO) {
-                        match = 1000;
-                    }
-                    break;
-                default:
-                    break;
-                }
-            }
-            if (match > bestMatch) {
-                bestMatch = match;
-                if (updatedChannelMask != NULL) {
-                    *updatedChannelMask = supported;
-                } else {
-                    return NO_ERROR; // any match will do in this case.
-                }
-            }
-        }
-    }
-    return bestMatch > 0 ? NO_ERROR : BAD_VALUE;
-}
-
-status_t AudioPort::checkExactFormat(audio_format_t format) const
-{
-    if (mFormats.isEmpty()) {
-        return NO_ERROR;
-    }
-
-    for (size_t i = 0; i < mFormats.size(); i ++) {
-        if (mFormats[i] == format) {
-            return NO_ERROR;
-        }
-    }
-    return BAD_VALUE;
-}
-
-status_t AudioPort::checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat)
-        const
-{
-    if (mFormats.isEmpty()) {
-        if (updatedFormat != NULL) {
-            *updatedFormat = format;
-        }
-        return NO_ERROR;
-    }
-
-    const bool checkInexact = // when port is input and format is linear pcm
-            mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK
-            && audio_is_linear_pcm(format);
-
-    // iterate from best format to worst format (reverse order)
-    for (ssize_t i = mFormats.size() - 1; i >= 0 ; --i) {
-        if (mFormats[i] == format ||
-                (checkInexact
-                        && mFormats[i] != AUDIO_FORMAT_DEFAULT
-                        && audio_is_linear_pcm(mFormats[i]))) {
-            // for inexact checks we take the first linear pcm format due to sorting.
-            if (updatedFormat != NULL) {
-                *updatedFormat = mFormats[i];
-            }
-            return NO_ERROR;
-        }
-    }
-    return BAD_VALUE;
-}
-
-uint32_t AudioPort::pickSamplingRate() const
-{
-    // special case for uninitialized dynamic profile
-    if (mSamplingRates.size() == 1 && mSamplingRates[0] == 0) {
-        return 0;
-    }
-
+    pickedRate = 0;
     // For direct outputs, pick minimum sampling rate: this helps ensuring that the
     // channel count / sampling rate combination chosen will be supported by the connected
     // sink
-    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
-            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+    if (isDirectOutput()) {
         uint32_t samplingRate = UINT_MAX;
-        for (size_t i = 0; i < mSamplingRates.size(); i ++) {
-            if ((mSamplingRates[i] < samplingRate) && (mSamplingRates[i] > 0)) {
-                samplingRate = mSamplingRates[i];
+        for (size_t i = 0; i < samplingRates.size(); i ++) {
+            if ((samplingRates[i] < samplingRate) && (samplingRates[i] > 0)) {
+                samplingRate = samplingRates[i];
             }
         }
-        return (samplingRate == UINT_MAX) ? 0 : samplingRate;
-    }
+        pickedRate = (samplingRate == UINT_MAX) ? 0 : samplingRate;
+    } else {
+        uint32_t maxRate = SAMPLE_RATE_HZ_MAX;
 
-    uint32_t samplingRate = 0;
-    uint32_t maxRate = MAX_MIXER_SAMPLING_RATE;
-
-    // For mixed output and inputs, use max mixer sampling rates. Do not
-    // limit sampling rate otherwise
-    // For inputs, also see checkCompatibleSamplingRate().
-    if (mType != AUDIO_PORT_TYPE_MIX) {
-        maxRate = UINT_MAX;
-    }
-    // TODO: should mSamplingRates[] be ordered in terms of our preference
-    // and we return the first (and hence most preferred) match?  This is of concern if
-    // we want to choose 96kHz over 192kHz for USB driver stability or resource constraints.
-    for (size_t i = 0; i < mSamplingRates.size(); i ++) {
-        if ((mSamplingRates[i] > samplingRate) && (mSamplingRates[i] <= maxRate)) {
-            samplingRate = mSamplingRates[i];
+        // For mixed output and inputs, use max mixer sampling rates. Do not
+        // limit sampling rate otherwise
+        // For inputs, also see checkCompatibleSamplingRate().
+        if (mType != AUDIO_PORT_TYPE_MIX) {
+            maxRate = UINT_MAX;
+        }
+        // TODO: should mSamplingRates[] be ordered in terms of our preference
+        // and we return the first (and hence most preferred) match?  This is of concern if
+        // we want to choose 96kHz over 192kHz for USB driver stability or resource constraints.
+        for (size_t i = 0; i < samplingRates.size(); i ++) {
+            if ((samplingRates[i] > pickedRate) && (samplingRates[i] <= maxRate)) {
+                pickedRate = samplingRates[i];
+            }
         }
     }
-    return samplingRate;
 }
 
-audio_channel_mask_t AudioPort::pickChannelMask() const
+void AudioPort::pickChannelMask(audio_channel_mask_t &pickedChannelMask,
+                                const ChannelsVector &channelMasks) const
 {
-    // special case for uninitialized dynamic profile
-    if (mChannelMasks.size() == 1 && mChannelMasks[0] == 0) {
-        return AUDIO_CHANNEL_NONE;
-    }
-    audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE;
-
+    pickedChannelMask = AUDIO_CHANNEL_NONE;
     // For direct outputs, pick minimum channel count: this helps ensuring that the
     // channel count / sampling rate combination chosen will be supported by the connected
     // sink
-    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
-            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+    if (isDirectOutput()) {
         uint32_t channelCount = UINT_MAX;
-        for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+        for (size_t i = 0; i < channelMasks.size(); i ++) {
             uint32_t cnlCount;
-            if (mUseInChannelMask) {
-                cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
+            if (useInputChannelMask()) {
+                cnlCount = audio_channel_count_from_in_mask(channelMasks[i]);
             } else {
-                cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
+                cnlCount = audio_channel_count_from_out_mask(channelMasks[i]);
             }
             if ((cnlCount < channelCount) && (cnlCount > 0)) {
-                channelMask = mChannelMasks[i];
+                pickedChannelMask = channelMasks[i];
                 channelCount = cnlCount;
             }
         }
-        return channelMask;
-    }
+    } else {
+        uint32_t channelCount = 0;
+        uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
 
-    uint32_t channelCount = 0;
-    uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
-
-    // For mixed output and inputs, use max mixer channel count. Do not
-    // limit channel count otherwise
-    if (mType != AUDIO_PORT_TYPE_MIX) {
-        maxCount = UINT_MAX;
-    }
-    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
-        uint32_t cnlCount;
-        if (mUseInChannelMask) {
-            cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
-        } else {
-            cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
+        // For mixed output and inputs, use max mixer channel count. Do not
+        // limit channel count otherwise
+        if (mType != AUDIO_PORT_TYPE_MIX) {
+            maxCount = UINT_MAX;
         }
-        if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
-            channelMask = mChannelMasks[i];
-            channelCount = cnlCount;
+        for (size_t i = 0; i < channelMasks.size(); i ++) {
+            uint32_t cnlCount;
+            if (useInputChannelMask()) {
+                cnlCount = audio_channel_count_from_in_mask(channelMasks[i]);
+            } else {
+                cnlCount = audio_channel_count_from_out_mask(channelMasks[i]);
+            }
+            if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
+                pickedChannelMask = channelMasks[i];
+                channelCount = cnlCount;
+            }
         }
     }
-    return channelMask;
 }
 
 /* format in order of increasing preference */
@@ -672,8 +239,7 @@
         AUDIO_FORMAT_PCM_FLOAT,
 };
 
-int AudioPort::compareFormats(audio_format_t format1,
-                                                  audio_format_t format2)
+int AudioPort::compareFormats(audio_format_t format1, audio_format_t format2)
 {
     // NOTE: AUDIO_FORMAT_INVALID is also considered not PCM and will be compared equal to any
     // compressed format and better than any PCM format. This is by design of pickFormat()
@@ -703,36 +269,77 @@
     return index1 - index2;
 }
 
-audio_format_t AudioPort::pickFormat() const
+bool AudioPort::isBetterFormatMatch(audio_format_t newFormat,
+                                    audio_format_t currentFormat,
+                                    audio_format_t targetFormat)
 {
-    // special case for uninitialized dynamic profile
-    if (mFormats.size() == 1 && mFormats[0] == 0) {
-        return AUDIO_FORMAT_DEFAULT;
+    if (newFormat == currentFormat) {
+        return false;
     }
+    if (currentFormat == AUDIO_FORMAT_INVALID) {
+        return true;
+    }
+    if (newFormat == targetFormat) {
+        return true;
+    }
+    int currentDiffBytes = (int)audio_bytes_per_sample(targetFormat) -
+            audio_bytes_per_sample(currentFormat);
+    int newDiffBytes = (int)audio_bytes_per_sample(targetFormat) -
+            audio_bytes_per_sample(newFormat);
 
-    audio_format_t format = AUDIO_FORMAT_DEFAULT;
-    audio_format_t bestFormat =
-            AudioPort::sPcmFormatCompareTable[
-                ARRAY_SIZE(AudioPort::sPcmFormatCompareTable) - 1];
-    // For mixed output and inputs, use best mixer output format. Do not
-    // limit format otherwise
-    if ((mType != AUDIO_PORT_TYPE_MIX) ||
-            ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
-             (((mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) != 0)))) {
+    if (abs(newDiffBytes) < abs(currentDiffBytes)) {
+        return true;
+    } else if (abs(newDiffBytes) == abs(currentDiffBytes)) {
+        return (newDiffBytes >= 0);
+    }
+    return false;
+}
+
+void AudioPort::pickAudioProfile(uint32_t &samplingRate,
+                                 audio_channel_mask_t &channelMask,
+                                 audio_format_t &format) const
+{
+    format = AUDIO_FORMAT_DEFAULT;
+    samplingRate = 0;
+    channelMask = AUDIO_CHANNEL_NONE;
+
+    // special case for uninitialized dynamic profile
+    if (!mProfiles.hasValidProfile()) {
+        return;
+    }
+    audio_format_t bestFormat = sPcmFormatCompareTable[ARRAY_SIZE(sPcmFormatCompareTable) - 1];
+    // For mixed output and inputs, use best mixer output format.
+    // Do not limit format otherwise
+    if ((mType != AUDIO_PORT_TYPE_MIX) || isDirectOutput()) {
         bestFormat = AUDIO_FORMAT_INVALID;
     }
 
-    for (size_t i = 0; i < mFormats.size(); i ++) {
-        if ((compareFormats(mFormats[i], format) > 0) &&
-                (compareFormats(mFormats[i], bestFormat) <= 0)) {
-            format = mFormats[i];
+    for (size_t i = 0; i < mProfiles.size(); i ++) {
+        if (!mProfiles[i]->isValid()) {
+            continue;
+        }
+        audio_format_t formatToCompare = mProfiles[i]->getFormat();
+        if ((compareFormats(formatToCompare, format) > 0) &&
+                (compareFormats(formatToCompare, bestFormat) <= 0)) {
+            uint32_t pickedSamplingRate = 0;
+            audio_channel_mask_t pickedChannelMask = AUDIO_CHANNEL_NONE;
+            pickChannelMask(pickedChannelMask, mProfiles[i]->getChannels());
+            pickSamplingRate(pickedSamplingRate, mProfiles[i]->getSampleRates());
+
+            if (formatToCompare != AUDIO_FORMAT_DEFAULT && pickedChannelMask != AUDIO_CHANNEL_NONE
+                    && pickedSamplingRate != 0) {
+                format = formatToCompare;
+                channelMask = pickedChannelMask;
+                samplingRate = pickedSamplingRate;
+                // TODO: shall we return on the first one or still trying to pick a better Profile?
+            }
         }
     }
-    return format;
+    ALOGV("%s Port[nm:%s] profile rate=%d, format=%d, channels=%d", __FUNCTION__, mName.string(),
+          samplingRate, channelMask, format);
 }
 
-status_t AudioPort::checkGain(const struct audio_gain_config *gainConfig,
-                                                  int index) const
+status_t AudioPort::checkGain(const struct audio_gain_config *gainConfig, int index) const
 {
     if (index < 0 || (size_t)index >= mGains.size()) {
         return BAD_VALUE;
@@ -740,77 +347,27 @@
     return mGains[index]->checkConfig(gainConfig);
 }
 
-void AudioPort::dump(int fd, int spaces) const
+void AudioPort::dump(int fd, int spaces, bool verbose) const
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
 
-    if (mName.length() != 0) {
+    if (!mName.isEmpty()) {
         snprintf(buffer, SIZE, "%*s- name: %s\n", spaces, "", mName.string());
         result.append(buffer);
+        write(fd, result.string(), result.size());
     }
+    if (verbose) {
+        mProfiles.dump(fd, spaces);
 
-    if (mSamplingRates.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- sampling rates: ", spaces, "");
-        result.append(buffer);
-        for (size_t i = 0; i < mSamplingRates.size(); i++) {
-            if (i == 0 && mSamplingRates[i] == 0) {
-                snprintf(buffer, SIZE, "Dynamic");
-            } else {
-                snprintf(buffer, SIZE, "%d", mSamplingRates[i]);
+        if (mGains.size() != 0) {
+            snprintf(buffer, SIZE, "%*s- gains:\n", spaces, "");
+            result = buffer;
+            write(fd, result.string(), result.size());
+            for (size_t i = 0; i < mGains.size(); i++) {
+                mGains[i]->dump(fd, spaces + 2, i);
             }
-            result.append(buffer);
-            result.append(i == (mSamplingRates.size() - 1) ? "" : ", ");
-        }
-        result.append("\n");
-    }
-
-    if (mChannelMasks.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- channel masks: ", spaces, "");
-        result.append(buffer);
-        for (size_t i = 0; i < mChannelMasks.size(); i++) {
-            ALOGV("AudioPort::dump mChannelMasks %zu %08x", i, mChannelMasks[i]);
-
-            if (i == 0 && mChannelMasks[i] == 0) {
-                snprintf(buffer, SIZE, "Dynamic");
-            } else {
-                snprintf(buffer, SIZE, "0x%04x", mChannelMasks[i]);
-            }
-            result.append(buffer);
-            result.append(i == (mChannelMasks.size() - 1) ? "" : ", ");
-        }
-        result.append("\n");
-    }
-
-    if (mFormats.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- formats: ", spaces, "");
-        result.append(buffer);
-        for (size_t i = 0; i < mFormats.size(); i++) {
-            const char *formatStr = ConfigParsingUtils::enumToString(sFormatNameToEnumTable,
-                                                 ARRAY_SIZE(sFormatNameToEnumTable),
-                                                 mFormats[i]);
-            const bool isEmptyStr = formatStr[0] == 0;
-            if (i == 0 && isEmptyStr) {
-                snprintf(buffer, SIZE, "Dynamic");
-            } else {
-                if (isEmptyStr) {
-                    snprintf(buffer, SIZE, "%#x", mFormats[i]);
-                } else {
-                    snprintf(buffer, SIZE, "%s", formatStr);
-                }
-            }
-            result.append(buffer);
-            result.append(i == (mFormats.size() - 1) ? "" : ", ");
-        }
-        result.append("\n");
-    }
-    write(fd, result.string(), result.size());
-    if (mGains.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- gains:\n", spaces, "");
-        write(fd, buffer, strlen(buffer) + 1);
-        for (size_t i = 0; i < mGains.size(); i++) {
-            mGains[i]->dump(fd, spaces + 2, i);
         }
     }
 }
@@ -830,9 +387,8 @@
     mGain.index = -1;
 }
 
-status_t AudioPortConfig::applyAudioPortConfig(
-                                                        const struct audio_port_config *config,
-                                                        struct audio_port_config *backupConfig)
+status_t AudioPortConfig::applyAudioPortConfig(const struct audio_port_config *config,
+                                               struct audio_port_config *backupConfig)
 {
     struct audio_port_config localBackupConfig;
     status_t status = NO_ERROR;
@@ -845,25 +401,19 @@
         status = NO_INIT;
         goto exit;
     }
+    status = audioport->checkExactAudioProfile(config->sample_rate,
+                                               config->channel_mask,
+                                               config->format);
+    if (status != NO_ERROR) {
+        goto exit;
+    }
     if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        status = audioport->checkExactSamplingRate(config->sample_rate);
-        if (status != NO_ERROR) {
-            goto exit;
-        }
         mSamplingRate = config->sample_rate;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        status = audioport->checkExactChannelMask(config->channel_mask);
-        if (status != NO_ERROR) {
-            goto exit;
-        }
         mChannelMask = config->channel_mask;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        status = audioport->checkExactFormat(config->format);
-        if (status != NO_ERROR) {
-            goto exit;
-        }
         mFormat = config->format;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
@@ -911,9 +461,11 @@
     } else {
         dstConfig->format = AUDIO_FORMAT_INVALID;
     }
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) {
+    sp<AudioPort> audioport = getAudioPort();
+    if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
         dstConfig->gain = mGain;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)) {
+        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)
+                && audioport->checkGain(&srcConfig->gain, srcConfig->gain.index) == OK) {
             dstConfig->gain = srcConfig->gain;
         }
     } else {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
new file mode 100644
index 0000000..98f7a94
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioProfile"
+//#define LOG_NDEBUG 0
+
+#include "AudioProfile.h"
+#include "AudioPort.h"
+#include "HwModule.h"
+#include "AudioGain.h"
+#include <utils/SortedVector.h>
+#include "TypeConverter.h"
+#include <media/AudioResamplerPublic.h>
+#include <algorithm>
+
+namespace android {
+
+status_t AudioProfile::checkExact(uint32_t samplingRate, audio_channel_mask_t channelMask,
+                                  audio_format_t format) const
+{
+    if (audio_formats_match(format, mFormat) &&
+            supportsChannels(channelMask) &&
+            supportsRate(samplingRate)) {
+        return NO_ERROR;
+    }
+    return BAD_VALUE;
+}
+
+template <typename T>
+bool operator == (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+    if (left.size() != right.size()) {
+        return false;
+    }
+    for(size_t index = 0; index < right.size(); index++) {
+        if (left[index] != right[index]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
+{
+    return (left.getFormat() == compareTo.getFormat()) &&
+            (left.getChannels() == compareTo.getChannels()) &&
+            (left.getSampleRates() == compareTo.getSampleRates());
+}
+
+status_t AudioProfile::checkCompatibleSamplingRate(uint32_t samplingRate,
+                                                   uint32_t &updatedSamplingRate) const
+{
+    ALOG_ASSERT(samplingRate > 0);
+
+    if (mSamplingRates.isEmpty()) {
+        updatedSamplingRate = samplingRate;
+        return NO_ERROR;
+    }
+
+    // Search for the closest supported sampling rate that is above (preferred)
+    // or below (acceptable) the desired sampling rate, within a permitted ratio.
+    // The sampling rates are sorted in ascending order.
+    size_t orderOfDesiredRate = mSamplingRates.orderOf(samplingRate);
+
+    // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
+    if (orderOfDesiredRate < mSamplingRates.size()) {
+        uint32_t candidate = mSamplingRates[orderOfDesiredRate];
+        if (candidate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
+            updatedSamplingRate = candidate;
+            return NO_ERROR;
+        }
+    }
+    // But if we have to up-sample from a lower sampling rate, that's OK.
+    if (orderOfDesiredRate != 0) {
+        uint32_t candidate = mSamplingRates[orderOfDesiredRate - 1];
+        if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
+            updatedSamplingRate = candidate;
+            return NO_ERROR;
+        }
+    }
+    // leave updatedSamplingRate unmodified
+    return BAD_VALUE;
+}
+
+status_t AudioProfile::checkCompatibleChannelMask(audio_channel_mask_t channelMask,
+                                                  audio_channel_mask_t &updatedChannelMask,
+                                                  audio_port_type_t portType,
+                                                  audio_port_role_t portRole) const
+{
+    if (mChannelMasks.isEmpty()) {
+        updatedChannelMask = channelMask;
+        return NO_ERROR;
+    }
+    const bool isRecordThread = portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK;
+    const bool isIndex = audio_channel_mask_get_representation(channelMask)
+            == AUDIO_CHANNEL_REPRESENTATION_INDEX;
+    int bestMatch = 0;
+    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+        audio_channel_mask_t supported = mChannelMasks[i];
+        if (supported == channelMask) {
+            // Exact matches always taken.
+            updatedChannelMask = channelMask;
+            return NO_ERROR;
+        }
+
+        // AUDIO_CHANNEL_NONE (value: 0) is used for dynamic channel support
+        if (isRecordThread && supported != AUDIO_CHANNEL_NONE) {
+            // Approximate (best) match:
+            // The match score measures how well the supported channel mask matches the
+            // desired mask, where increasing-is-better.
+            //
+            // TODO: Some tweaks may be needed.
+            // Should be a static function of the data processing library.
+            //
+            // In priority:
+            // match score = 1000 if legacy channel conversion equivalent (always prefer this)
+            // OR
+            // match score += 100 if the channel mask representations match
+            // match score += number of channels matched.
+            //
+            // If there are no matched channels, the mask may still be accepted
+            // but the playback or record will be silent.
+            const bool isSupportedIndex = (audio_channel_mask_get_representation(supported)
+                    == AUDIO_CHANNEL_REPRESENTATION_INDEX);
+            int match;
+            if (isIndex && isSupportedIndex) {
+                // index equivalence
+                match = 100 + __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask)
+                            & audio_channel_mask_get_bits(supported));
+            } else if (isIndex && !isSupportedIndex) {
+                const uint32_t equivalentBits =
+                        (1 << audio_channel_count_from_in_mask(supported)) - 1 ;
+                match = __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask) & equivalentBits);
+            } else if (!isIndex && isSupportedIndex) {
+                const uint32_t equivalentBits =
+                        (1 << audio_channel_count_from_in_mask(channelMask)) - 1;
+                match = __builtin_popcount(
+                        equivalentBits & audio_channel_mask_get_bits(supported));
+            } else {
+                // positional equivalence
+                match = 100 + __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask)
+                            & audio_channel_mask_get_bits(supported));
+                switch (supported) {
+                case AUDIO_CHANNEL_IN_FRONT_BACK:
+                case AUDIO_CHANNEL_IN_STEREO:
+                    if (channelMask == AUDIO_CHANNEL_IN_MONO) {
+                        match = 1000;
+                    }
+                    break;
+                case AUDIO_CHANNEL_IN_MONO:
+                    if (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
+                            || channelMask == AUDIO_CHANNEL_IN_STEREO) {
+                        match = 1000;
+                    }
+                    break;
+                default:
+                    break;
+                }
+            }
+            if (match > bestMatch) {
+                bestMatch = match;
+                updatedChannelMask = supported;
+            }
+        }
+    }
+    return bestMatch > 0 ? NO_ERROR : BAD_VALUE;
+}
+
+void AudioProfile::dump(int fd, int spaces) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
+             mIsDynamicChannels ? "[dynamic channels]" : "",
+             mIsDynamicRate ? "[dynamic rates]" : "");
+    result.append(buffer);
+    if (mName.length() != 0) {
+        snprintf(buffer, SIZE, "%*s- name: %s\n", spaces, "", mName.string());
+        result.append(buffer);
+    }
+    std::string formatLiteral;
+    if (FormatConverter::toString(mFormat, formatLiteral)) {
+        snprintf(buffer, SIZE, "%*s- format: %s\n", spaces, "", formatLiteral.c_str());
+        result.append(buffer);
+    }
+    if (!mSamplingRates.isEmpty()) {
+        snprintf(buffer, SIZE, "%*s- sampling rates:", spaces, "");
+        result.append(buffer);
+        for (size_t i = 0; i < mSamplingRates.size(); i++) {
+            snprintf(buffer, SIZE, "%d", mSamplingRates[i]);
+            result.append(buffer);
+            result.append(i == (mSamplingRates.size() - 1) ? "" : ", ");
+        }
+        result.append("\n");
+    }
+
+    if (!mChannelMasks.isEmpty()) {
+        snprintf(buffer, SIZE, "%*s- channel masks:", spaces, "");
+        result.append(buffer);
+        for (size_t i = 0; i < mChannelMasks.size(); i++) {
+            snprintf(buffer, SIZE, "0x%04x", mChannelMasks[i]);
+            result.append(buffer);
+            result.append(i == (mChannelMasks.size() - 1) ? "" : ", ");
+        }
+        result.append("\n");
+    }
+    write(fd, result.string(), result.size());
+}
+
+status_t AudioProfileVector::checkExactProfile(uint32_t samplingRate,
+                                               audio_channel_mask_t channelMask,
+                                               audio_format_t format) const
+{
+    if (isEmpty()) {
+        return NO_ERROR;
+    }
+
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioProfile> profile = itemAt(i);
+        if (profile->checkExact(samplingRate, channelMask, format) == NO_ERROR) {
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
+
+status_t AudioProfileVector::checkCompatibleProfile(uint32_t &samplingRate,
+                                                    audio_channel_mask_t &channelMask,
+                                                    audio_format_t &format,
+                                                    audio_port_type_t portType,
+                                                    audio_port_role_t portRole) const
+{
+    if (isEmpty()) {
+        return NO_ERROR;
+    }
+
+    const bool checkInexact = // when port is input and format is linear pcm
+            portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK
+            && audio_is_linear_pcm(format);
+
+    // iterate from best format to worst format (reverse order)
+    for (ssize_t i = size() - 1; i >= 0 ; --i) {
+        const sp<AudioProfile> profile = itemAt(i);
+        audio_format_t formatToCompare = profile->getFormat();
+        if (formatToCompare == format ||
+                (checkInexact
+                        && formatToCompare != AUDIO_FORMAT_DEFAULT
+                        && audio_is_linear_pcm(formatToCompare))) {
+            // Compatible profile has been found, checks if this profile has compatible
+            // rate and channels as well
+            audio_channel_mask_t updatedChannels;
+            uint32_t updatedRate;
+            if (profile->checkCompatibleChannelMask(channelMask, updatedChannels,
+                                                    portType, portRole) == NO_ERROR &&
+                    profile->checkCompatibleSamplingRate(samplingRate, updatedRate) == NO_ERROR) {
+                // for inexact checks we take the first linear pcm format due to sorting.
+                format = formatToCompare;
+                channelMask = updatedChannels;
+                samplingRate = updatedRate;
+                return NO_ERROR;
+            }
+        }
+    }
+    return BAD_VALUE;
+}
+
+int AudioProfileVector::compareFormats(const sp<AudioProfile> *profile1,
+                                       const sp<AudioProfile> *profile2)
+{
+    return AudioPort::compareFormats((*profile1)->getFormat(), (*profile2)->getFormat());
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
new file mode 100644
index 0000000..79ad1f7
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioRoute"
+//#define LOG_NDEBUG 0
+
+#include "AudioRoute.h"
+#include "HwModule.h"
+#include "AudioGain.h"
+
+namespace android
+{
+
+void AudioRoute::dump(int fd, int spaces) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*s- Type: %s\n", spaces, "", mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix");
+    result.append(buffer);
+
+    snprintf(buffer, SIZE, "%*s- Sink: %s\n", spaces, "", mSink->getTagName().string());
+    result.append(buffer);
+
+    if (mSources.size() != 0) {
+        snprintf(buffer, SIZE, "%*s- Sources: \n", spaces, "");
+        result.append(buffer);
+        for (size_t i = 0; i < mSources.size(); i++) {
+            snprintf(buffer, SIZE, "%*s%s \n", spaces + 4, "", mSources[i]->getTagName().string());
+            result.append(buffer);
+        }
+    }
+    result.append("\n");
+
+    write(fd, result.string(), result.size());
+}
+
+}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
new file mode 100644
index 0000000..da983c5
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioSession"
+//#define LOG_NDEBUG 0
+
+#include <AudioPolicyInterface.h>
+#include "AudioSession.h"
+#include "AudioGain.h"
+#include "TypeConverter.h"
+#include <cutils/log.h>
+#include <utils/String8.h>
+
+namespace android {
+
+AudioSession::AudioSession(audio_session_t session,
+                           audio_source_t inputSource,
+                           audio_format_t format,
+                           uint32_t sampleRate,
+                           audio_channel_mask_t channelMask,
+                           audio_input_flags_t flags,
+                           uid_t uid,
+                           bool isSoundTrigger,
+                           AudioMix* policyMix,
+                           AudioPolicyClientInterface *clientInterface) :
+    mSession(session), mInputSource(inputSource),
+    mConfig({ .format = format, .sample_rate = sampleRate, .channel_mask = channelMask}),
+    mFlags(flags), mUid(uid), mIsSoundTrigger(isSoundTrigger),
+    mOpenCount(1), mActiveCount(0), mPolicyMix(policyMix), mClientInterface(clientInterface),
+    mInfoProvider(NULL)
+{
+}
+
+uint32_t AudioSession::changeOpenCount(int delta)
+{
+    if ((delta + (int)mOpenCount) < 0) {
+        ALOGW("%s invalid delta %d, open count %d",
+              __FUNCTION__, delta, mOpenCount);
+        mOpenCount = (uint32_t)(-delta);
+    }
+    mOpenCount += delta;
+    ALOGV("%s open count %d", __FUNCTION__, mOpenCount);
+    return mOpenCount;
+}
+
+uint32_t AudioSession::changeActiveCount(int delta)
+{
+    const uint32_t oldActiveCount = mActiveCount;
+    if ((delta + (int)mActiveCount) < 0) {
+        ALOGW("%s invalid delta %d, active count %d",
+              __FUNCTION__, delta, mActiveCount);
+        mActiveCount = (uint32_t)(-delta);
+    }
+    mActiveCount += delta;
+    ALOGV("%s active count %d", __FUNCTION__, mActiveCount);
+    int event = RECORD_CONFIG_EVENT_NONE;
+
+    if ((oldActiveCount == 0) && (mActiveCount > 0)) {
+        event = RECORD_CONFIG_EVENT_START;
+    } else if ((oldActiveCount > 0) && (mActiveCount == 0)) {
+        event = RECORD_CONFIG_EVENT_STOP;
+    }
+
+    if (event != RECORD_CONFIG_EVENT_NONE) {
+        // Dynamic policy callback:
+        // if input maps to a dynamic policy with an activity listener, notify of state change
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+                    (event == RECORD_CONFIG_EVENT_START) ? MIX_STATE_MIXING : MIX_STATE_IDLE);
+        }
+
+        // Recording configuration callback:
+        const AudioSessionInfoProvider* provider = mInfoProvider;
+        const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
+                AUDIO_CONFIG_BASE_INITIALIZER;
+        const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
+                AUDIO_PATCH_HANDLE_NONE;
+        mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
+                &mConfig, &deviceConfig, patchHandle);
+    }
+
+    return mActiveCount;
+}
+
+bool AudioSession::matches(const sp<AudioSession> &other) const
+{
+    if (other->session() == mSession &&
+        other->inputSource() == mInputSource &&
+        other->format() == mConfig.format &&
+        other->sampleRate() == mConfig.sample_rate &&
+        other->channelMask() == mConfig.channel_mask &&
+        other->flags() == mFlags &&
+        other->uid() == mUid) {
+        return true;
+    }
+    return false;
+}
+
+void AudioSession::setInfoProvider(AudioSessionInfoProvider *provider)
+{
+    mInfoProvider = provider;
+}
+
+void AudioSession::onSessionInfoUpdate() const
+{
+    if (mActiveCount > 0) {
+        // resend the callback after requerying the informations from the info provider
+        const AudioSessionInfoProvider* provider = mInfoProvider;
+        const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
+                AUDIO_CONFIG_BASE_INITIALIZER;
+        const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
+                AUDIO_PATCH_HANDLE_NONE;
+        mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
+                mSession, mInputSource,
+                &mConfig, &deviceConfig, patchHandle);
+    }
+}
+
+status_t AudioSession::dump(int fd, int spaces, int index) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*sAudio session %d:\n", spaces, "", index+1);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- session: %2d\n", spaces, "", mSession);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- input source: %d\n", spaces, "", mInputSource);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- format: %08x\n", spaces, "", mConfig.format);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- sample: %d\n", spaces, "", mConfig.sample_rate);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- channel mask: %08x\n",
+             spaces, "", mConfig.channel_mask);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- is soundtrigger: %s\n",
+             spaces, "", mIsSoundTrigger ? "true" : "false");
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- open count: %d\n", spaces, "", mOpenCount);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- active count: %d\n", spaces, "", mActiveCount);
+    result.append(buffer);
+
+    write(fd, result.string(), result.size());
+    return NO_ERROR;
+}
+
+status_t AudioSessionCollection::addSession(audio_session_t session,
+                                         const sp<AudioSession>& audioSession,
+                                         AudioSessionInfoProvider *provider)
+{
+    ssize_t index = indexOfKey(session);
+
+    if (index >= 0) {
+        ALOGW("addSession() session %d already in", session);
+        return ALREADY_EXISTS;
+    }
+    audioSession->setInfoProvider(provider);
+    add(session, audioSession);
+    ALOGV("addSession() session %d  client %d source %d",
+            session, audioSession->uid(), audioSession->inputSource());
+    return NO_ERROR;
+}
+
+status_t AudioSessionCollection::removeSession(audio_session_t session)
+{
+    ssize_t index = indexOfKey(session);
+
+    if (index < 0) {
+        ALOGW("removeSession() session %d not in", session);
+        return ALREADY_EXISTS;
+    }
+    ALOGV("removeSession() session %d", session);
+    valueAt(index)->setInfoProvider(NULL);
+    removeItemsAt(index);
+    return NO_ERROR;
+}
+
+uint32_t AudioSessionCollection::getOpenCount() const
+{
+    uint32_t openCount = 0;
+    for (size_t i = 0; i < size(); i++) {
+        openCount += valueAt(i)->openCount();
+    }
+    return openCount;
+}
+
+AudioSessionCollection AudioSessionCollection::getActiveSessions() const
+{
+    AudioSessionCollection activeSessions;
+    for (size_t i = 0; i < size(); i++) {
+        if (valueAt(i)->activeCount() != 0) {
+            activeSessions.add(valueAt(i)->session(), valueAt(i));
+        }
+    }
+    return activeSessions;
+}
+
+bool AudioSessionCollection::hasActiveSession() const
+{
+    return getActiveSessions().size() != 0;
+}
+
+bool AudioSessionCollection::isSourceActive(audio_source_t source) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioSession>  audioSession = valueAt(i);
+        // AUDIO_SOURCE_HOTWORD is equivalent to AUDIO_SOURCE_VOICE_RECOGNITION only if it
+        // corresponds to an active capture triggered by a hardware hotword recognition
+        if (audioSession->activeCount() > 0 &&
+                ((audioSession->inputSource() == source) ||
+                ((source == AUDIO_SOURCE_VOICE_RECOGNITION) &&
+                 (audioSession->inputSource() == AUDIO_SOURCE_HOTWORD) &&
+                 audioSession->isSoundTrigger()))) {
+            return true;
+        }
+    }
+    return false;
+}
+
+void AudioSessionCollection::onSessionInfoUpdate() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        valueAt(i)->onSessionInfoUpdate();
+    }
+}
+
+
+status_t AudioSessionCollection::dump(int fd, int spaces) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    snprintf(buffer, SIZE, "%*sAudio Sessions:\n", spaces, "");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        valueAt(i)->dump(fd, spaces + 2, i);
+    }
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp
new file mode 100644
index 0000000..ba33e57
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSourceDescriptor.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioSourceDescriptor"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include <media/AudioPolicyHelper.h>
+#include <HwModule.h>
+#include <AudioGain.h>
+#include <AudioSourceDescriptor.h>
+#include <DeviceDescriptor.h>
+#include <IOProfile.h>
+#include <AudioOutputDescriptor.h>
+
+namespace android {
+
+status_t AudioSourceDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "mStream: %d\n", audio_attributes_to_stream_type(&mAttributes));
+    result.append(buffer);
+    snprintf(buffer, SIZE, "mDevice:\n");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+    mDevice->dump(fd, 2 , 0);
+    return NO_ERROR;
+}
+
+
+status_t AudioSourceCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nAudio sources dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- Source %d dump:\n", keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index 89ef045..a3536e5 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -18,139 +18,230 @@
 //#define LOG_NDEBUG 0
 
 #include "ConfigParsingUtils.h"
+#include <convert/convert.h>
 #include "AudioGain.h"
+#include "IOProfile.h"
+#include "TypeConverter.h"
 #include <hardware/audio.h>
 #include <utils/Log.h>
 #include <cutils/misc.h>
 
 namespace android {
 
-//static
-uint32_t ConfigParsingUtils::stringToEnum(const struct StringToEnum *table,
-                                              size_t size,
-                                              const char *name)
-{
-    for (size_t i = 0; i < size; i++) {
-        if (strcmp(table[i].name, name) == 0) {
-            ALOGV("stringToEnum() found %s", table[i].name);
-            return table[i].value;
-        }
-    }
-    return 0;
-}
-
-//static
-const char *ConfigParsingUtils::enumToString(const struct StringToEnum *table,
-                                              size_t size,
-                                              uint32_t value)
-{
-    for (size_t i = 0; i < size; i++) {
-        if (table[i].value == value) {
-            return table[i].name;
-        }
-    }
-    return "";
-}
-
-//static
-bool ConfigParsingUtils::stringToBool(const char *value)
-{
-    return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0));
-}
-
-
 // --- audio_policy.conf file parsing
-//static
-uint32_t ConfigParsingUtils::parseOutputFlagNames(char *name)
-{
-    uint32_t flag = 0;
 
-    // it is OK to cast name to non const here as we are not going to use it after
-    // strtok() modifies it
-    char *flagName = strtok(name, "|");
-    while (flagName != NULL) {
-        if (strlen(flagName) != 0) {
-            flag |= ConfigParsingUtils::stringToEnum(sOutputFlagNameToEnumTable,
-                               ARRAY_SIZE(sOutputFlagNameToEnumTable),
-                               flagName);
+//static
+void ConfigParsingUtils::loadAudioPortGain(cnode *root, AudioPort &audioPort, int index)
+{
+    cnode *node = root->first_child;
+
+    sp<AudioGain> gain = new AudioGain(index, audioPort.useInputChannelMask());
+
+    while (node) {
+        if (strcmp(node->name, GAIN_MODE) == 0) {
+            gain->setMode(GainModeConverter::maskFromString(node->value));
+        } else if (strcmp(node->name, GAIN_CHANNELS) == 0) {
+            audio_channel_mask_t mask;
+            if (audioPort.useInputChannelMask()) {
+                if (InputChannelConverter::fromString(node->value, mask)) {
+                    gain->setChannelMask(mask);
+                }
+            } else {
+                if (OutputChannelConverter::fromString(node->value, mask)) {
+                    gain->setChannelMask(mask);
+                }
+            }
+        } else if (strcmp(node->name, GAIN_MIN_VALUE) == 0) {
+            gain->setMinValueInMb(atoi(node->value));
+        } else if (strcmp(node->name, GAIN_MAX_VALUE) == 0) {
+            gain->setMaxValueInMb(atoi(node->value));
+        } else if (strcmp(node->name, GAIN_DEFAULT_VALUE) == 0) {
+            gain->setDefaultValueInMb(atoi(node->value));
+        } else if (strcmp(node->name, GAIN_STEP_VALUE) == 0) {
+            gain->setStepValueInMb(atoi(node->value));
+        } else if (strcmp(node->name, GAIN_MIN_RAMP_MS) == 0) {
+            gain->setMinRampInMs(atoi(node->value));
+        } else if (strcmp(node->name, GAIN_MAX_RAMP_MS) == 0) {
+            gain->setMaxRampInMs(atoi(node->value));
         }
-        flagName = strtok(NULL, "|");
-    }
-    //force direct flag if offload flag is set: offloading implies a direct output stream
-    // and all common behaviors are driven by checking only the direct flag
-    // this should normally be set appropriately in the policy configuration file
-    if ((flag & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
-        flag |= AUDIO_OUTPUT_FLAG_DIRECT;
+        node = node->next;
     }
 
-    return flag;
+    ALOGV("loadGain() adding new gain mode %08x channel mask %08x min mB %d max mB %d",
+          gain->getMode(), gain->getChannelMask(), gain->getMinValueInMb(),
+          gain->getMaxValueInMb());
+
+    if (gain->getMode() == 0) {
+        return;
+    }
+    audioPort.mGains.add(gain);
+}
+
+void ConfigParsingUtils::loadAudioPortGains(cnode *root, AudioPort &audioPort)
+{
+    cnode *node = root->first_child;
+    int index = 0;
+    while (node) {
+        ALOGV("loadGains() loading gain %s", node->name);
+        loadAudioPortGain(node, audioPort, index++);
+        node = node->next;
+    }
 }
 
 //static
-uint32_t ConfigParsingUtils::parseInputFlagNames(char *name)
+void ConfigParsingUtils::loadDeviceDescriptorGains(cnode *root, sp<DeviceDescriptor> &deviceDesc)
 {
-    uint32_t flag = 0;
+    loadAudioPortGains(root, *deviceDesc);
+    if (deviceDesc->mGains.size() > 0) {
+        deviceDesc->mGains[0]->getDefaultConfig(&deviceDesc->mGain);
+    }
+}
 
-    // it is OK to cast name to non const here as we are not going to use it after
-    // strtok() modifies it
-    char *flagName = strtok(name, "|");
-    while (flagName != NULL) {
-        if (strlen(flagName) != 0) {
-            flag |= stringToEnum(sInputFlagNameToEnumTable,
-                               ARRAY_SIZE(sInputFlagNameToEnumTable),
-                               flagName);
+//static
+status_t ConfigParsingUtils::loadHwModuleDevice(cnode *root, DeviceVector &devices)
+{
+    cnode *node = root->first_child;
+
+    audio_devices_t type = AUDIO_DEVICE_NONE;
+    while (node) {
+        if (strcmp(node->name, APM_DEVICE_TYPE) == 0) {
+            DeviceConverter::fromString(node->value, type);
+            break;
         }
-        flagName = strtok(NULL, "|");
+        node = node->next;
     }
-    return flag;
+    if (type == AUDIO_DEVICE_NONE ||
+            (!audio_is_input_device(type) && !audio_is_output_device(type))) {
+        ALOGW("loadDevice() bad type %08x", type);
+        return BAD_VALUE;
+    }
+    sp<DeviceDescriptor> deviceDesc = new DeviceDescriptor(type, String8(root->name));
+
+    node = root->first_child;
+    while (node) {
+        if (strcmp(node->name, APM_DEVICE_ADDRESS) == 0) {
+            deviceDesc->mAddress = String8((char *)node->value);
+        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
+            if (audio_is_input_device(type)) {
+                deviceDesc->addAudioProfile(
+                        new AudioProfile(gDynamicFormat,
+                                         inputChannelMasksFromString(node->value),
+                                         SampleRateVector()));
+            } else {
+                deviceDesc->addAudioProfile(
+                        new AudioProfile(gDynamicFormat,
+                                         outputChannelMasksFromString(node->value),
+                                         SampleRateVector()));
+            }
+        } else if (strcmp(node->name, GAINS_TAG) == 0) {
+            loadDeviceDescriptorGains(node, deviceDesc);
+        }
+        node = node->next;
+    }
+
+    ALOGV("loadDevice() adding device tag (literal type) %s type %08x address %s",
+          deviceDesc->getTagName().string(), type, deviceDesc->mAddress.string());
+
+    devices.add(deviceDesc);
+    return NO_ERROR;
 }
 
 //static
-audio_devices_t ConfigParsingUtils::parseDeviceNames(char *name)
+status_t ConfigParsingUtils::loadHwModuleProfile(cnode *root, sp<HwModule> &module,
+                                                 audio_port_role_t role)
 {
-    uint32_t device = 0;
+    cnode *node = root->first_child;
 
-    char *devName = strtok(name, "|");
-    while (devName != NULL) {
-        if (strlen(devName) != 0) {
-            device |= stringToEnum(sDeviceTypeToEnumTable,
-                                 ARRAY_SIZE(sDeviceTypeToEnumTable),
-                                 devName);
-         }
-        devName = strtok(NULL, "|");
-     }
-    return device;
+    sp<IOProfile> profile = new IOProfile(String8(root->name), role);
+
+    AudioProfileVector audioProfiles;
+    SampleRateVector sampleRates;
+    ChannelsVector channels;
+    FormatVector formats;
+
+    while (node) {
+        if (strcmp(node->name, FORMATS_TAG) == 0 &&
+                strcmp(node->value, DYNAMIC_VALUE_TAG) != 0) {
+            formats = formatsFromString(node->value);
+        } else if (strcmp(node->name, SAMPLING_RATES_TAG) == 0 &&
+                  strcmp(node->value, DYNAMIC_VALUE_TAG) != 0) {
+            collectionFromString<SampleRateTraits>(node->value, sampleRates);
+        } else if (strcmp(node->name, CHANNELS_TAG) == 0 &&
+                   strcmp(node->value, DYNAMIC_VALUE_TAG) != 0) {
+            if (role == AUDIO_PORT_ROLE_SINK) {
+                channels = inputChannelMasksFromString(node->value);
+            } else {
+                channels = outputChannelMasksFromString(node->value);
+            }
+        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
+            DeviceVector devices;
+            loadDevicesFromTag(node->value, devices, module->getDeclaredDevices());
+            profile->setSupportedDevices(devices);
+        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
+            if (role == AUDIO_PORT_ROLE_SINK) {
+                profile->setFlags(InputFlagConverter::maskFromString(node->value));
+            } else {
+                profile->setFlags(OutputFlagConverter::maskFromString(node->value));
+            }
+        } else if (strcmp(node->name, GAINS_TAG) == 0) {
+            loadAudioPortGains(node, *profile);
+        }
+        node = node->next;
+    }
+    if (formats.isEmpty()) {
+        sp<AudioProfile> profileToAdd = new AudioProfile(gDynamicFormat, channels, sampleRates);
+        profileToAdd->setDynamicFormat(true);
+        profileToAdd->setDynamicChannels(channels.isEmpty());
+        profileToAdd->setDynamicRate(sampleRates.isEmpty());
+        audioProfiles.add(profileToAdd);
+    } else {
+        for (size_t i = 0; i < formats.size(); i++) {
+            // For compatibility reason, for each format, creates a profile with the same
+            // collection of rate and channels.
+            sp<AudioProfile> profileToAdd = new AudioProfile(formats[i], channels, sampleRates);
+            profileToAdd->setDynamicFormat(formats[i] == gDynamicFormat);
+            profileToAdd->setDynamicChannels(channels.isEmpty());
+            profileToAdd->setDynamicRate(sampleRates.isEmpty());
+            audioProfiles.add(profileToAdd);
+        }
+    }
+    profile->setAudioProfiles(audioProfiles);
+    ALOGW_IF(!profile->hasSupportedDevices(), "load%s() invalid supported devices",
+             role == AUDIO_PORT_ROLE_SINK ? "Input" : "Output");
+    if (profile->hasSupportedDevices()) {
+        ALOGV("load%s() adding Supported Devices %04x, mFlags %04x",
+              role == AUDIO_PORT_ROLE_SINK ? "Input" : "Output",
+              profile->getSupportedDevicesType(), profile->getFlags());
+        return module->addProfile(profile);
+    }
+    return BAD_VALUE;
 }
 
 //static
-void ConfigParsingUtils::loadHwModule(cnode *root, HwModuleCollection &hwModules,
-                                      DeviceVector &availableInputDevices,
-                                      DeviceVector &availableOutputDevices,
-                                      sp<DeviceDescriptor> &defaultOutputDevices,
-                                      bool &isSpeakerDrcEnable)
+status_t ConfigParsingUtils::loadHwModule(cnode *root, sp<HwModule> &module,
+                                          AudioPolicyConfig &config)
 {
     status_t status = NAME_NOT_FOUND;
-    cnode *node;
-    sp<HwModule> module = new HwModule(root->name);
-
-    node = config_find(root, DEVICES_TAG);
+    cnode *node = config_find(root, DEVICES_TAG);
     if (node != NULL) {
         node = node->first_child;
+        DeviceVector devices;
         while (node) {
             ALOGV("loadHwModule() loading device %s", node->name);
-            status_t tmpStatus = module->loadDevice(node);
+            status_t tmpStatus = loadHwModuleDevice(node, devices);
             if (status == NAME_NOT_FOUND || status == NO_ERROR) {
                 status = tmpStatus;
             }
             node = node->next;
         }
+        module->setDeclaredDevices(devices);
     }
     node = config_find(root, OUTPUTS_TAG);
     if (node != NULL) {
         node = node->first_child;
         while (node) {
             ALOGV("loadHwModule() loading output %s", node->name);
-            status_t tmpStatus = module->loadOutput(node);
+            status_t tmpStatus = loadHwModuleProfile(node, module, AUDIO_PORT_ROLE_SOURCE);
             if (status == NAME_NOT_FOUND || status == NO_ERROR) {
                 status = tmpStatus;
             }
@@ -162,27 +253,20 @@
         node = node->first_child;
         while (node) {
             ALOGV("loadHwModule() loading input %s", node->name);
-            status_t tmpStatus = module->loadInput(node);
+            status_t tmpStatus = loadHwModuleProfile(node, module, AUDIO_PORT_ROLE_SINK);
             if (status == NAME_NOT_FOUND || status == NO_ERROR) {
                 status = tmpStatus;
             }
             node = node->next;
         }
     }
-    loadGlobalConfig(root, module, availableInputDevices, availableOutputDevices,
-                     defaultOutputDevices, isSpeakerDrcEnable);
-
-    if (status == NO_ERROR) {
-        hwModules.add(module);
-    }
+    loadModuleGlobalConfig(root, module, config);
+    return status;
 }
 
 //static
 void ConfigParsingUtils::loadHwModules(cnode *root, HwModuleCollection &hwModules,
-                                       DeviceVector &availableInputDevices,
-                                       DeviceVector &availableOutputDevices,
-                                       sp<DeviceDescriptor> &defaultOutputDevices,
-                                       bool &isSpeakerDrcEnabled)
+                                       AudioPolicyConfig &config)
 {
     cnode *node = config_find(root, AUDIO_HW_MODULE_TAG);
     if (node == NULL) {
@@ -192,18 +276,49 @@
     node = node->first_child;
     while (node) {
         ALOGV("loadHwModules() loading module %s", node->name);
-        loadHwModule(node, hwModules, availableInputDevices, availableOutputDevices,
-                     defaultOutputDevices, isSpeakerDrcEnabled);
+        sp<HwModule> module = new HwModule(node->name);
+        if (loadHwModule(node, module, config) == NO_ERROR) {
+            hwModules.add(module);
+        }
         node = node->next;
     }
 }
 
 //static
-void ConfigParsingUtils::loadGlobalConfig(cnode *root, const sp<HwModule>& module,
-                                          DeviceVector &availableInputDevices,
-                                          DeviceVector &availableOutputDevices,
-                                          sp<DeviceDescriptor> &defaultOutputDevice,
-                                          bool &speakerDrcEnabled)
+void ConfigParsingUtils::loadDevicesFromTag(const char *tag, DeviceVector &devices,
+                                            const DeviceVector &declaredDevices)
+{
+    char *tagLiteral = strndup(tag, strlen(tag));
+    char *devTag = strtok(tagLiteral, "|");
+    while (devTag != NULL) {
+        if (strlen(devTag) != 0) {
+            audio_devices_t type;
+            if (DeviceConverter::fromString(devTag, type)) {
+                uint32_t inBit = type & AUDIO_DEVICE_BIT_IN;
+                type &= ~AUDIO_DEVICE_BIT_IN;
+                while (type) {
+                  audio_devices_t singleType =
+                        inBit | (1 << (31 - __builtin_clz(type)));
+                    type &= ~singleType;
+                    sp<DeviceDescriptor> dev = new DeviceDescriptor(singleType);
+                    devices.add(dev);
+                }
+            } else {
+                sp<DeviceDescriptor> deviceDesc =
+                        declaredDevices.getDeviceFromTagName(String8(devTag));
+                if (deviceDesc != 0) {
+                    devices.add(deviceDesc);
+                }
+            }
+        }
+        devTag = strtok(NULL, "|");
+    }
+    free(tagLiteral);
+}
+
+//static
+void ConfigParsingUtils::loadModuleGlobalConfig(cnode *root, const sp<HwModule> &module,
+                                                AudioPolicyConfig &config)
 {
     cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
 
@@ -212,52 +327,68 @@
     }
     DeviceVector declaredDevices;
     if (module != NULL) {
-        declaredDevices = module->mDeclaredDevices;
+        declaredDevices = module->getDeclaredDevices();
     }
 
     node = node->first_child;
     while (node) {
         if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
-            availableOutputDevices.loadDevicesFromTag((char *)node->value,
-                                                        declaredDevices);
+            DeviceVector availableOutputDevices;
+            loadDevicesFromTag(node->value, availableOutputDevices, declaredDevices);
             ALOGV("loadGlobalConfig() Attached Output Devices %08x",
                   availableOutputDevices.types());
+            config.addAvailableOutputDevices(availableOutputDevices);
         } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
-            audio_devices_t device = (audio_devices_t)stringToEnum(
-                    sDeviceTypeToEnumTable,
-                    ARRAY_SIZE(sDeviceTypeToEnumTable),
-                    (char *)node->value);
+            audio_devices_t device = AUDIO_DEVICE_NONE;
+            DeviceConverter::fromString(node->value, device);
             if (device != AUDIO_DEVICE_NONE) {
-                defaultOutputDevice = new DeviceDescriptor(device);
+                sp<DeviceDescriptor> defaultOutputDevice = new DeviceDescriptor(device);
+                config.setDefaultOutputDevice(defaultOutputDevice);
+                ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", defaultOutputDevice->type());
             } else {
                 ALOGW("loadGlobalConfig() default device not specified");
             }
-            ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", defaultOutputDevice->type());
         } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
-            availableInputDevices.loadDevicesFromTag((char *)node->value,
-                                                       declaredDevices);
+            DeviceVector availableInputDevices;
+            loadDevicesFromTag(node->value, availableInputDevices, declaredDevices);
             ALOGV("loadGlobalConfig() Available InputDevices %08x", availableInputDevices.types());
-        } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
-            speakerDrcEnabled = stringToBool((char *)node->value);
-            ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", speakerDrcEnabled);
+            config.addAvailableInputDevices(availableInputDevices);
         } else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) {
             uint32_t major, minor;
             sscanf((char *)node->value, "%u.%u", &major, &minor);
-            module->mHalVersion = HARDWARE_DEVICE_API_VERSION(major, minor);
+            module->setHalVersion(HARDWARE_DEVICE_API_VERSION(major, minor));
             ALOGV("loadGlobalConfig() mHalVersion = %04x major %u minor %u",
-                  module->mHalVersion, major, minor);
+                  module->getHalVersion(), major, minor);
         }
         node = node->next;
     }
 }
 
 //static
-status_t ConfigParsingUtils::loadAudioPolicyConfig(const char *path,
-                                                   HwModuleCollection &hwModules,
-                                                   DeviceVector &availableInputDevices,
-                                                   DeviceVector &availableOutputDevices,
-                                                   sp<DeviceDescriptor> &defaultOutputDevices,
-                                                   bool &isSpeakerDrcEnabled)
+void ConfigParsingUtils::loadGlobalConfig(cnode *root, AudioPolicyConfig &config,
+                                          const sp<HwModule>& primaryModule)
+{
+    cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
+
+    if (node == NULL) {
+        return;
+    }
+    node = node->first_child;
+    while (node) {
+        if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
+            bool speakerDrcEnabled;
+            if (utilities::convertTo<std::string, bool>(node->value, speakerDrcEnabled)) {
+                ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", speakerDrcEnabled);
+                config.setSpeakerDrcEnabled(speakerDrcEnabled);
+            }
+        }
+        node = node->next;
+    }
+    loadModuleGlobalConfig(root, primaryModule, config);
+}
+
+//static
+status_t ConfigParsingUtils::loadConfig(const char *path, AudioPolicyConfig &config)
 {
     cnode *root;
     char *data;
@@ -269,13 +400,14 @@
     root = config_node("", "");
     config_load(root, data);
 
-    loadHwModules(root, hwModules,
-                  availableInputDevices, availableOutputDevices,
-                  defaultOutputDevices, isSpeakerDrcEnabled);
-    // legacy audio_policy.conf files have one global_configuration section
-    loadGlobalConfig(root, hwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY),
-                     availableInputDevices, availableOutputDevices,
-                     defaultOutputDevices, isSpeakerDrcEnabled);
+    HwModuleCollection hwModules;
+    loadHwModules(root, hwModules, config);
+
+    // legacy audio_policy.conf files have one global_configuration section, attached to primary.
+    loadGlobalConfig(root, config, hwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY));
+
+    config.setHwModules(hwModules);
+
     config_free(root);
     free(root);
     free(data);
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 1f1fca3..35f078e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -18,19 +18,21 @@
 //#define LOG_NDEBUG 0
 
 #include "DeviceDescriptor.h"
+#include "TypeConverter.h"
 #include "AudioGain.h"
 #include "HwModule.h"
-#include "ConfigParsingUtils.h"
 
 namespace android {
 
-DeviceDescriptor::DeviceDescriptor(audio_devices_t type) :
+DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const String8 &tagName) :
     AudioPort(String8(""), AUDIO_PORT_TYPE_DEVICE,
               audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
                                              AUDIO_PORT_ROLE_SOURCE),
-    mTag(""), mAddress(""), mDeviceType(type), mId(0)
+    mAddress(""), mTagName(tagName), mDeviceType(type), mId(0)
 {
-
+    if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX || type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
+        mAddress = String8("0");
+    }
 }
 
 audio_port_handle_t DeviceDescriptor::getId() const
@@ -48,23 +50,11 @@
 {
     // Devices are considered equal if they:
     // - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
-    // - have the same address or one device does not specify the address
-    // - have the same channel mask or one device does not specify the channel mask
+    // - have the same address
     if (other == 0) {
         return false;
     }
-    return (mDeviceType == other->mDeviceType) &&
-           (mAddress == "" || other->mAddress == "" || mAddress == other->mAddress) &&
-           (mChannelMask == 0 || other->mChannelMask == 0 ||
-                mChannelMask == other->mChannelMask);
-}
-
-void DeviceDescriptor::loadGains(cnode *root)
-{
-    AudioPort::loadGains(root);
-    if (mGains.size() > 0) {
-        mGains[0]->getDefaultConfig(&mGain);
-    }
+    return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress);
 }
 
 void DeviceVector::refreshTypes()
@@ -86,6 +76,16 @@
     return -1;
 }
 
+void DeviceVector::add(const DeviceVector &devices)
+{
+    for (size_t i = 0; i < devices.size(); i++) {
+        sp<DeviceDescriptor> device = devices.itemAt(i);
+        if (indexOf(device) < 0 && SortedVector::add(device) >= 0) {
+            refreshTypes();
+        }
+    }
+}
+
 ssize_t DeviceVector::add(const sp<DeviceDescriptor>& item)
 {
     ssize_t ret = indexOf(item);
@@ -129,49 +129,6 @@
     return devices;
 }
 
-void DeviceVector::loadDevicesFromType(audio_devices_t types)
-{
-    DeviceVector deviceList;
-
-    uint32_t role_bit = AUDIO_DEVICE_BIT_IN & types;
-    types &= ~role_bit;
-
-    while (types) {
-        uint32_t i = 31 - __builtin_clz(types);
-        uint32_t type = 1 << i;
-        types &= ~type;
-        add(new DeviceDescriptor(type | role_bit));
-    }
-}
-
-void DeviceVector::loadDevicesFromTag(char *tag,
-                                       const DeviceVector& declaredDevices)
-{
-    char *devTag = strtok(tag, "|");
-    while (devTag != NULL) {
-        if (strlen(devTag) != 0) {
-            audio_devices_t type = ConfigParsingUtils::stringToEnum(sDeviceTypeToEnumTable,
-                                 ARRAY_SIZE(sDeviceTypeToEnumTable),
-                                 devTag);
-            if (type != AUDIO_DEVICE_NONE) {
-                sp<DeviceDescriptor> dev = new DeviceDescriptor(type);
-                if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX ||
-                        type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
-                    dev->mAddress = String8("0");
-                }
-                add(dev);
-            } else {
-                sp<DeviceDescriptor> deviceDesc =
-                        declaredDevices.getDeviceFromTag(String8(devTag));
-                if (deviceDesc != 0) {
-                    add(deviceDesc);
-                }
-            }
-         }
-         devTag = strtok(NULL, "|");
-     }
-}
-
 sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, String8 address) const
 {
     sp<DeviceDescriptor> device;
@@ -234,11 +191,11 @@
     return devices;
 }
 
-sp<DeviceDescriptor> DeviceVector::getDeviceFromTag(const String8& tag) const
+sp<DeviceDescriptor> DeviceVector::getDeviceFromTagName(const String8 &tagName) const
 {
     sp<DeviceDescriptor> device;
     for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->mTag == tag) {
+        if (itemAt(i)->getTagName() == tagName) {
             device = itemAt(i);
             break;
         }
@@ -246,30 +203,36 @@
     return device;
 }
 
-
-status_t DeviceVector::dump(int fd, const String8 &direction) const
+status_t DeviceVector::dump(int fd, const String8 &tag, int spaces, bool verbose) const
 {
+    if (isEmpty()) {
+        return NO_ERROR;
+    }
     const size_t SIZE = 256;
     char buffer[SIZE];
 
-    snprintf(buffer, SIZE, "\n Available %s devices:\n", direction.string());
+    snprintf(buffer, SIZE, "%*s- %s devices:\n", spaces, "", tag.string());
     write(fd, buffer, strlen(buffer));
     for (size_t i = 0; i < size(); i++) {
-        itemAt(i)->dump(fd, 2, i);
+        itemAt(i)->dump(fd, spaces + 2, i, verbose);
     }
     return NO_ERROR;
 }
 
-audio_policy_dev_state_t DeviceVector::getDeviceConnectionState(const sp<DeviceDescriptor> &devDesc) const
-{
-    ssize_t index = indexOf(devDesc);
-    return index >= 0 ? AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
-}
-
 void DeviceDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
                                          const struct audio_port_config *srcConfig) const
 {
-    dstConfig->config_mask = AUDIO_PORT_CONFIG_CHANNEL_MASK|AUDIO_PORT_CONFIG_GAIN;
+    dstConfig->config_mask = AUDIO_PORT_CONFIG_GAIN;
+    if (mSamplingRate != 0) {
+        dstConfig->config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
+    }
+    if (mChannelMask != AUDIO_CHANNEL_NONE) {
+        dstConfig->config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
+    }
+    if (mFormat != AUDIO_FORMAT_INVALID) {
+        dstConfig->config_mask |= AUDIO_PORT_CONFIG_FORMAT;
+    }
+
     if (srcConfig != NULL) {
         dstConfig->config_mask |= srcConfig->config_mask;
     }
@@ -286,7 +249,7 @@
     // without the test?
     // This has been demonstrated to NOT be true (at start up)
     // ALOG_ASSERT(mModule != NULL);
-    dstConfig->ext.device.hw_module = mModule != 0 ? mModule->mHandle : AUDIO_IO_HANDLE_NONE;
+    dstConfig->ext.device.hw_module = mModule != 0 ? mModule->mHandle : AUDIO_MODULE_HANDLE_NONE;
     strncpy(dstConfig->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
 }
 
@@ -303,12 +266,10 @@
 
 void DeviceDescriptor::importAudioPort(const sp<AudioPort> port) {
     AudioPort::importAudioPort(port);
-    mSamplingRate = port->pickSamplingRate();
-    mFormat = port->pickFormat();
-    mChannelMask = port->pickChannelMask();
+    port->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
 }
 
-status_t DeviceDescriptor::dump(int fd, int spaces, int index) const
+status_t DeviceDescriptor::dump(int fd, int spaces, int index, bool verbose) const
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -320,28 +281,30 @@
         snprintf(buffer, SIZE, "%*s- id: %2d\n", spaces, "", mId);
         result.append(buffer);
     }
-    snprintf(buffer, SIZE, "%*s- type: %-48s\n", spaces, "",
-            ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable,
-                    ARRAY_SIZE(sDeviceTypeToEnumTable),
-                    mDeviceType));
-    result.append(buffer);
+    if (!mTagName.isEmpty()) {
+        snprintf(buffer, SIZE, "%*s- tag name: %s\n", spaces, "", mTagName.string());
+        result.append(buffer);
+    }
+    std::string deviceLiteral;
+    if (DeviceConverter::toString(mDeviceType, deviceLiteral)) {
+        snprintf(buffer, SIZE, "%*s- type: %-48s\n", spaces, "", deviceLiteral.c_str());
+        result.append(buffer);
+    }
     if (mAddress.size() != 0) {
         snprintf(buffer, SIZE, "%*s- address: %-32s\n", spaces, "", mAddress.string());
         result.append(buffer);
     }
     write(fd, result.string(), result.size());
-    AudioPort::dump(fd, spaces);
+    AudioPort::dump(fd, spaces, verbose);
 
     return NO_ERROR;
 }
 
 void DeviceDescriptor::log() const
 {
-    ALOGI("Device id:%d type:0x%X:%s, addr:%s",
-          mId,
-          mDeviceType,
-          ConfigParsingUtils::enumToString(
-             sDeviceNameToEnumTable, ARRAY_SIZE(sDeviceNameToEnumTable), mDeviceType),
+    std::string device;
+    DeviceConverter::toString(mDeviceType, device);
+    ALOGI("Device id:%d type:0x%X:%s, addr:%s", mId,  mDeviceType, device.c_str(),
           mAddress.string());
 
     AudioPort::log("  ");
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 33d838d..7b2341e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -45,7 +45,8 @@
 
 EffectDescriptorCollection::EffectDescriptorCollection() :
     mTotalEffectsCpuLoad(0),
-    mTotalEffectsMemory(0)
+    mTotalEffectsMemory(0),
+    mTotalEffectsMemoryMaxUsed(0)
 {
 
 }
@@ -62,6 +63,9 @@
         return INVALID_OPERATION;
     }
     mTotalEffectsMemory += desc->memoryUsage;
+    if (mTotalEffectsMemory > mTotalEffectsMemoryMaxUsed) {
+        mTotalEffectsMemoryMaxUsed = mTotalEffectsMemory;
+    }
     ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
             desc->name, io, strategy, session, id);
     ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
@@ -175,8 +179,9 @@
     const size_t SIZE = 256;
     char buffer[SIZE];
 
-    snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
-             (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
+    snprintf(buffer, SIZE,
+            "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB, Max memory used: %d KB\n",
+             (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory, mTotalEffectsMemoryMaxUsed);
     write(fd, buffer, strlen(buffer));
 
     snprintf(buffer, SIZE, "Registered effects:\n");
diff --git a/services/audiopolicy/enginedefault/src/Gains.cpp b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
similarity index 90%
rename from services/audiopolicy/enginedefault/src/Gains.cpp
rename to services/audiopolicy/common/managerdefinitions/src/Gains.cpp
index d06365c..e3fc9a8 100644
--- a/services/audiopolicy/enginedefault/src/Gains.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
@@ -114,7 +114,7 @@
 };
 
 const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
-                                                  [Volume::DEVICE_CATEGORY_CNT] = {
+                                                  [DEVICE_CATEGORY_CNT] = {
     { // AUDIO_STREAM_VOICE_CALL
         Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
         Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
@@ -197,17 +197,11 @@
 };
 
 //static
-float Gains::volIndexToDb(Volume::device_category deviceCategory,
-                          const StreamDescriptor& streamDesc,
-                          int indexInUi)
+float Gains::volIndexToDb(const VolumeCurvePoint *curve, int indexMin, int indexMax, int indexInUi)
 {
-    const VolumeCurvePoint *curve = streamDesc.getVolumeCurvePoint(deviceCategory);
-
     // the volume index in the UI is relative to the min and max volume indices for this stream type
-    int nbSteps = 1 + curve[Volume::VOLMAX].mIndex -
-            curve[Volume::VOLMIN].mIndex;
-    int volIdx = (nbSteps * (indexInUi - streamDesc.getVolumeIndexMin())) /
-            (streamDesc.getVolumeIndexMax() - streamDesc.getVolumeIndexMin());
+    int nbSteps = 1 + curve[Volume::VOLMAX].mIndex - curve[Volume::VOLMIN].mIndex;
+    int volIdx = (nbSteps * (indexInUi - indexMin)) / (indexMax - indexMin);
 
     // find what part of the curve this index volume belongs to, or if it's out of bounds
     int segment = 0;
@@ -241,15 +235,4 @@
     return decibels;
 }
 
-
-//static
-float Gains::volIndexToAmpl(Volume::device_category deviceCategory,
-                            const StreamDescriptor& streamDesc,
-                            int indexInUi)
-{
-    return Volume::DbToAmpl(volIndexToDb(deviceCategory, streamDesc, indexInUi));
-}
-
-
-
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 7e2050b..a85c07f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -20,190 +20,83 @@
 #include "HwModule.h"
 #include "IOProfile.h"
 #include "AudioGain.h"
-#include "ConfigParsingUtils.h"
-#include "audio_policy_conf.h"
 #include <hardware/audio.h>
 #include <policy.h>
 
 namespace android {
 
-HwModule::HwModule(const char *name)
-    : mName(strndup(name, AUDIO_HARDWARE_MODULE_ID_MAX_LEN)),
-      mHalVersion(AUDIO_DEVICE_API_VERSION_MIN), mHandle(0)
+HwModule::HwModule(const char *name, uint32_t halVersion)
+    : mName(String8(name)),
+      mHandle(AUDIO_MODULE_HANDLE_NONE),
+      mHalVersion(halVersion)
 {
 }
 
 HwModule::~HwModule()
 {
     for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-        mOutputProfiles[i]->mSupportedDevices.clear();
+        mOutputProfiles[i]->clearSupportedDevices();
     }
     for (size_t i = 0; i < mInputProfiles.size(); i++) {
-        mInputProfiles[i]->mSupportedDevices.clear();
+        mInputProfiles[i]->clearSupportedDevices();
     }
-    free((void *)mName);
-}
-
-status_t HwModule::loadInput(cnode *root)
-{
-    cnode *node = root->first_child;
-
-    sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SINK);
-
-    while (node) {
-        if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) {
-            profile->loadSamplingRates((char *)node->value);
-        } else if (strcmp(node->name, FORMATS_TAG) == 0) {
-            profile->loadFormats((char *)node->value);
-        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
-            profile->loadInChannels((char *)node->value);
-        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            profile->mSupportedDevices.loadDevicesFromTag((char *)node->value,
-                                                           mDeclaredDevices);
-        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
-            profile->mFlags = ConfigParsingUtils::parseInputFlagNames((char *)node->value);
-        } else if (strcmp(node->name, GAINS_TAG) == 0) {
-            profile->loadGains(node);
-        }
-        node = node->next;
-    }
-    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
-            "loadInput() invalid supported devices");
-    ALOGW_IF(profile->mChannelMasks.size() == 0,
-            "loadInput() invalid supported channel masks");
-    ALOGW_IF(profile->mSamplingRates.size() == 0,
-            "loadInput() invalid supported sampling rates");
-    ALOGW_IF(profile->mFormats.size() == 0,
-            "loadInput() invalid supported formats");
-    if (!profile->mSupportedDevices.isEmpty() &&
-            (profile->mChannelMasks.size() != 0) &&
-            (profile->mSamplingRates.size() != 0) &&
-            (profile->mFormats.size() != 0)) {
-
-        ALOGV("loadInput() adding input Supported Devices %04x",
-              profile->mSupportedDevices.types());
-
-        profile->attach(this);
-        mInputProfiles.add(profile);
-        return NO_ERROR;
-    } else {
-        return BAD_VALUE;
-    }
-}
-
-status_t HwModule::loadOutput(cnode *root)
-{
-    cnode *node = root->first_child;
-
-    sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SOURCE);
-
-    while (node) {
-        if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) {
-            profile->loadSamplingRates((char *)node->value);
-        } else if (strcmp(node->name, FORMATS_TAG) == 0) {
-            profile->loadFormats((char *)node->value);
-        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
-            profile->loadOutChannels((char *)node->value);
-        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            profile->mSupportedDevices.loadDevicesFromTag((char *)node->value,
-                                                           mDeclaredDevices);
-        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
-            profile->mFlags = ConfigParsingUtils::parseOutputFlagNames((char *)node->value);
-        } else if (strcmp(node->name, GAINS_TAG) == 0) {
-            profile->loadGains(node);
-        }
-        node = node->next;
-    }
-    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
-            "loadOutput() invalid supported devices");
-    ALOGW_IF(profile->mChannelMasks.size() == 0,
-            "loadOutput() invalid supported channel masks");
-    ALOGW_IF(profile->mSamplingRates.size() == 0,
-            "loadOutput() invalid supported sampling rates");
-    ALOGW_IF(profile->mFormats.size() == 0,
-            "loadOutput() invalid supported formats");
-    if (!profile->mSupportedDevices.isEmpty() &&
-            (profile->mChannelMasks.size() != 0) &&
-            (profile->mSamplingRates.size() != 0) &&
-            (profile->mFormats.size() != 0)) {
-
-        ALOGV("loadOutput() adding output Supported Devices %04x, mFlags %04x",
-              profile->mSupportedDevices.types(), profile->mFlags);
-        profile->attach(this);
-        mOutputProfiles.add(profile);
-        return NO_ERROR;
-    } else {
-        return BAD_VALUE;
-    }
-}
-
-status_t HwModule::loadDevice(cnode *root)
-{
-    cnode *node = root->first_child;
-
-    audio_devices_t type = AUDIO_DEVICE_NONE;
-    while (node) {
-        if (strcmp(node->name, APM_DEVICE_TYPE) == 0) {
-            type = ConfigParsingUtils::parseDeviceNames((char *)node->value);
-            break;
-        }
-        node = node->next;
-    }
-    if (type == AUDIO_DEVICE_NONE ||
-            (!audio_is_input_device(type) && !audio_is_output_device(type))) {
-        ALOGW("loadDevice() bad type %08x", type);
-        return BAD_VALUE;
-    }
-    sp<DeviceDescriptor> deviceDesc = new DeviceDescriptor(type);
-    deviceDesc->mTag = String8(root->name);
-
-    node = root->first_child;
-    while (node) {
-        if (strcmp(node->name, APM_DEVICE_ADDRESS) == 0) {
-            deviceDesc->mAddress = String8((char *)node->value);
-        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
-            if (audio_is_input_device(type)) {
-                deviceDesc->loadInChannels((char *)node->value);
-            } else {
-                deviceDesc->loadOutChannels((char *)node->value);
-            }
-        } else if (strcmp(node->name, GAINS_TAG) == 0) {
-            deviceDesc->loadGains(node);
-        }
-        node = node->next;
-    }
-
-    ALOGV("loadDevice() adding device tag %s type %08x address %s",
-          deviceDesc->mTag.string(), type, deviceDesc->mAddress.string());
-
-    mDeclaredDevices.add(deviceDesc);
-
-    return NO_ERROR;
 }
 
 status_t HwModule::addOutputProfile(String8 name, const audio_config_t *config,
-                                                  audio_devices_t device, String8 address)
+                                    audio_devices_t device, String8 address)
 {
-    sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SOURCE);
+    sp<IOProfile> profile = new OutputProfile(name);
 
-    profile->mSamplingRates.add(config->sample_rate);
-    profile->mChannelMasks.add(config->channel_mask);
-    profile->mFormats.add(config->format);
+    profile->addAudioProfile(new AudioProfile(config->format, config->channel_mask,
+                                              config->sample_rate));
 
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
     devDesc->mAddress = address;
-    profile->mSupportedDevices.add(devDesc);
+    profile->addSupportedDevice(devDesc);
 
+    return addOutputProfile(profile);
+}
+
+status_t HwModule::addOutputProfile(const sp<IOProfile> &profile)
+{
     profile->attach(this);
     mOutputProfiles.add(profile);
-
+    mPorts.add(profile);
     return NO_ERROR;
 }
 
+status_t HwModule::addInputProfile(const sp<IOProfile> &profile)
+{
+    profile->attach(this);
+    mInputProfiles.add(profile);
+    mPorts.add(profile);
+    return NO_ERROR;
+}
+
+status_t HwModule::addProfile(const sp<IOProfile> &profile)
+{
+    switch (profile->getRole()) {
+    case AUDIO_PORT_ROLE_SOURCE:
+        return addOutputProfile(profile);
+    case AUDIO_PORT_ROLE_SINK:
+        return addInputProfile(profile);
+    case AUDIO_PORT_ROLE_NONE:
+        return BAD_VALUE;
+    }
+    return BAD_VALUE;
+}
+
+void HwModule::setProfiles(const IOProfileCollection &profiles)
+{
+    for (size_t i = 0; i < profiles.size(); i++) {
+        addProfile(profiles[i]);
+    }
+}
+
 status_t HwModule::removeOutputProfile(String8 name)
 {
     for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-        if (mOutputProfiles[i]->mName == name) {
+        if (mOutputProfiles[i]->getName() == name) {
             mOutputProfiles.removeAt(i);
             break;
         }
@@ -213,30 +106,26 @@
 }
 
 status_t HwModule::addInputProfile(String8 name, const audio_config_t *config,
-                                                  audio_devices_t device, String8 address)
+                                   audio_devices_t device, String8 address)
 {
-    sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SINK);
-
-    profile->mSamplingRates.add(config->sample_rate);
-    profile->mChannelMasks.add(config->channel_mask);
-    profile->mFormats.add(config->format);
+    sp<IOProfile> profile = new InputProfile(name);
+    profile->addAudioProfile(new AudioProfile(config->format, config->channel_mask,
+                                              config->sample_rate));
 
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
     devDesc->mAddress = address;
-    profile->mSupportedDevices.add(devDesc);
+    profile->addSupportedDevice(devDesc);
 
-    ALOGV("addInputProfile() name %s rate %d mask 0x08", name.string(), config->sample_rate, config->channel_mask);
+    ALOGV("addInputProfile() name %s rate %d mask 0x%08x",
+          name.string(), config->sample_rate, config->channel_mask);
 
-    profile->attach(this);
-    mInputProfiles.add(profile);
-
-    return NO_ERROR;
+    return addInputProfile(profile);
 }
 
 status_t HwModule::removeInputProfile(String8 name)
 {
     for (size_t i = 0; i < mInputProfiles.size(); i++) {
-        if (mInputProfiles[i]->mName == name) {
+        if (mInputProfiles[i]->getName() == name) {
             mInputProfiles.removeAt(i);
             break;
         }
@@ -245,6 +134,88 @@
     return NO_ERROR;
 }
 
+void HwModule::setDeclaredDevices(const DeviceVector &devices)
+{
+    mDeclaredDevices = devices;
+    for (size_t i = 0; i < devices.size(); i++) {
+        mPorts.add(devices[i]);
+    }
+}
+
+sp<DeviceDescriptor> HwModule::getRouteSinkDevice(const sp<AudioRoute> &route) const
+{
+    sp<DeviceDescriptor> sinkDevice = 0;
+    if (route->getSink()->getType() == AUDIO_PORT_TYPE_DEVICE) {
+        sinkDevice = mDeclaredDevices.getDeviceFromTagName(route->getSink()->getTagName());
+    }
+    return sinkDevice;
+}
+
+DeviceVector HwModule::getRouteSourceDevices(const sp<AudioRoute> &route) const
+{
+    DeviceVector sourceDevices;
+    Vector <sp<AudioPort> > sources = route->getSources();
+    for (size_t i = 0; i < sources.size(); i++) {
+        if (sources[i]->getType() == AUDIO_PORT_TYPE_DEVICE) {
+            sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(sources[i]->getTagName()));
+        }
+    }
+    return sourceDevices;
+}
+
+void HwModule::setRoutes(const AudioRouteVector &routes)
+{
+    mRoutes = routes;
+    // Now updating the streams (aka IOProfile until now) supported devices
+    refreshSupportedDevices();
+}
+
+void HwModule::refreshSupportedDevices()
+{
+    // Now updating the streams (aka IOProfile until now) supported devices
+    for (size_t i = 0; i < mInputProfiles.size(); i++) {
+        sp<IOProfile> stream = mInputProfiles[i];
+        DeviceVector sourceDevices;
+        const AudioRouteVector &routes = stream->getRoutes();
+        for (size_t j = 0; j < routes.size(); j++) {
+            sp<AudioPort> sink = routes[j]->getSink();
+            if (sink == 0 || stream != sink) {
+                ALOGE("%s: Invalid route attached to input stream", __FUNCTION__);
+                continue;
+            }
+            DeviceVector sourceDevicesForRoute = getRouteSourceDevices(routes[j]);
+            if (sourceDevicesForRoute.isEmpty()) {
+                ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
+                continue;
+            }
+            sourceDevices.add(sourceDevicesForRoute);
+        }
+        if (sourceDevices.isEmpty()) {
+            ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
+            continue;
+        }
+        stream->setSupportedDevices(sourceDevices);
+    }
+    for (size_t i = 0; i < mOutputProfiles.size(); i++) {
+        sp<IOProfile> stream = mOutputProfiles[i];
+        DeviceVector sinkDevices;
+        const AudioRouteVector &routes = stream->getRoutes();
+        for (size_t j = 0; j < routes.size(); j++) {
+            sp<AudioPort> source = routes[j]->getSources().findByTagName(stream->getTagName());
+            if (source == 0 || stream != source) {
+                ALOGE("%s: Invalid route attached to output stream", __FUNCTION__);
+                continue;
+            }
+            sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(routes[j]);
+            if (sinkDevice == 0) {
+                ALOGE("%s: invalid sink device for %s", __FUNCTION__, stream->getName().string());
+                continue;
+            }
+            sinkDevices.add(sinkDevice);
+        }
+        stream->setSupportedDevices(sinkDevices);
+    }
+}
 
 void HwModule::dump(int fd)
 {
@@ -252,7 +223,7 @@
     char buffer[SIZE];
     String8 result;
 
-    snprintf(buffer, SIZE, "  - name: %s\n", mName);
+    snprintf(buffer, SIZE, "  - name: %s\n", getName());
     result.append(buffer);
     snprintf(buffer, SIZE, "  - handle: %d\n", mHandle);
     result.append(buffer);
@@ -275,12 +246,8 @@
             mInputProfiles[i]->dump(fd);
         }
     }
-    if (mDeclaredDevices.size()) {
-        write(fd, "  - devices:\n", strlen("  - devices:\n"));
-        for (size_t i = 0; i < mDeclaredDevices.size(); i++) {
-            mDeclaredDevices[i]->dump(fd, 4, i);
-        }
-    }
+    mDeclaredDevices.dump(fd, String8("Declared"),  2, true);
+    mRoutes.dump(fd, 2);
 }
 
 sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
@@ -289,7 +256,7 @@
 
     for (size_t i = 0; i < size(); i++)
     {
-        if (strcmp(itemAt(i)->mName, name) == 0) {
+        if (strcmp(itemAt(i)->getName(), name) == 0) {
             return itemAt(i);
         }
     }
@@ -302,20 +269,19 @@
     sp <HwModule> module;
 
     for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->mHandle == 0) {
+        if (itemAt(i)->getHandle() == 0) {
             continue;
         }
         if (audio_is_output_device(device)) {
             for (size_t j = 0; j < itemAt(i)->mOutputProfiles.size(); j++)
             {
-                if (itemAt(i)->mOutputProfiles[j]->mSupportedDevices.types() & device) {
+                if (itemAt(i)->mOutputProfiles[j]->supportDevice(device)) {
                     return itemAt(i);
                 }
             }
         } else {
             for (size_t j = 0; j < itemAt(i)->mInputProfiles.size(); j++) {
-                if (itemAt(i)->mInputProfiles[j]->mSupportedDevices.types() &
-                        device & ~AUDIO_DEVICE_BIT_IN) {
+                if (itemAt(i)->mInputProfiles[j]->supportDevice(device)) {
                     return itemAt(i);
                 }
             }
@@ -326,7 +292,8 @@
 
 sp<DeviceDescriptor>  HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
                                                               const char *device_address,
-                                                              const char *device_name) const
+                                                              const char *device_name,
+                                                              bool matchAdress) const
 {
     String8 address = (device_address == NULL) ? String8("") : String8(device_address);
     // handle legacy remote submix case where the address was not always specified
@@ -339,20 +306,21 @@
         if (hwModule->mHandle == 0) {
             continue;
         }
-        DeviceVector deviceList =
-                hwModule->mDeclaredDevices.getDevicesFromTypeAddr(device, address);
+        DeviceVector declaredDevices = hwModule->getDeclaredDevices();
+        DeviceVector deviceList = declaredDevices.getDevicesFromTypeAddr(device, address);
         if (!deviceList.isEmpty()) {
             return deviceList.itemAt(0);
         }
-        deviceList = hwModule->mDeclaredDevices.getDevicesFromType(device);
-        if (!deviceList.isEmpty()) {
-            return deviceList.itemAt(0);
+        if (!matchAdress) {
+            deviceList = declaredDevices.getDevicesFromType(device);
+            if (!deviceList.isEmpty()) {
+                return deviceList.itemAt(0);
+            }
         }
     }
 
-    sp<DeviceDescriptor> devDesc =
-            new DeviceDescriptor(device);
-    devDesc->mName = device_name;
+    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
+    devDesc->setName(String8(device_name));
     devDesc->mAddress = address;
     return devDesc;
 }
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 7b6d51d..abf2dd4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -20,18 +20,10 @@
 #include "IOProfile.h"
 #include "HwModule.h"
 #include "AudioGain.h"
+#include "TypeConverter.h"
 
 namespace android {
 
-IOProfile::IOProfile(const String8& name, audio_port_role_t role)
-    : AudioPort(name, AUDIO_PORT_TYPE_MIX, role)
-{
-}
-
-IOProfile::~IOProfile()
-{
-}
-
 // checks if the IO profile is compatible with specified parameters.
 // Sampling rate, format and channel mask must be specified in order to
 // get a valid a match
@@ -45,8 +37,10 @@
                                     audio_channel_mask_t *updatedChannelMask,
                                     uint32_t flags) const
 {
-    const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE;
-    const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
+    const bool isPlaybackThread =
+            getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SOURCE;
+    const bool isRecordThread =
+            getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SINK;
     ALOG_ASSERT(isPlaybackThread != isRecordThread);
 
 
@@ -61,47 +55,35 @@
         }
     }
 
-    if (samplingRate == 0) {
-         return false;
-    }
-    uint32_t myUpdatedSamplingRate = samplingRate;
-    if (isPlaybackThread && checkExactSamplingRate(samplingRate) != NO_ERROR) {
-         return false;
-    }
-    if (isRecordThread && checkCompatibleSamplingRate(samplingRate, &myUpdatedSamplingRate) !=
-            NO_ERROR) {
+    if (!audio_is_valid_format(format) ||
+            (isPlaybackThread && (samplingRate == 0 || !audio_is_output_channel(channelMask))) ||
+            (isRecordThread && (!audio_is_input_channel(channelMask)))) {
          return false;
     }
 
-    if (!audio_is_valid_format(format)) {
-        return false;
-    }
-    if (isPlaybackThread && checkExactFormat(format) != NO_ERROR) {
-        return false;
-    }
     audio_format_t myUpdatedFormat = format;
-    if (isRecordThread && checkCompatibleFormat(format, &myUpdatedFormat) != NO_ERROR) {
-        return false;
-    }
-
-    if (isPlaybackThread && (!audio_is_output_channel(channelMask) ||
-            checkExactChannelMask(channelMask) != NO_ERROR)) {
-        return false;
-    }
     audio_channel_mask_t myUpdatedChannelMask = channelMask;
-    if (isRecordThread && (!audio_is_input_channel(channelMask) ||
-            checkCompatibleChannelMask(channelMask, &myUpdatedChannelMask) != NO_ERROR)) {
-        return false;
+    uint32_t myUpdatedSamplingRate = samplingRate;
+    if (isRecordThread)
+    {
+        if (checkCompatibleAudioProfile(
+                myUpdatedSamplingRate, myUpdatedChannelMask, myUpdatedFormat) != NO_ERROR) {
+            return false;
+        }
+    } else {
+        if (checkExactAudioProfile(samplingRate, channelMask, format) != NO_ERROR) {
+            return false;
+        }
     }
 
-    if (isPlaybackThread && (mFlags & flags) != flags) {
+    if (isPlaybackThread && (getFlags() & flags) != flags) {
         return false;
     }
     // The only input flag that is allowed to be different is the fast flag.
     // An existing fast stream is compatible with a normal track request.
     // An existing normal stream is compatible with a fast track request,
     // but the fast request will be denied by AudioFlinger and converted to normal track.
-    if (isRecordThread && ((mFlags ^ flags) &
+    if (isRecordThread && ((getFlags() ^ flags) &
             ~AUDIO_INPUT_FLAG_FAST)) {
         return false;
     }
@@ -126,39 +108,15 @@
 
     AudioPort::dump(fd, 4);
 
-    snprintf(buffer, SIZE, "    - flags: 0x%04x\n", mFlags);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "    - devices:\n");
+    snprintf(buffer, SIZE, "    - flags: 0x%04x\n", getFlags());
     result.append(buffer);
     write(fd, result.string(), result.size());
-    for (size_t i = 0; i < mSupportedDevices.size(); i++) {
-        mSupportedDevices[i]->dump(fd, 6, i);
-    }
+    mSupportedDevices.dump(fd, String8("Supported"), 4, false);
 }
 
 void IOProfile::log()
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    ALOGV("    - sampling rates: ");
-    for (size_t i = 0; i < mSamplingRates.size(); i++) {
-        ALOGV("  %d", mSamplingRates[i]);
-    }
-
-    ALOGV("    - channel masks: ");
-    for (size_t i = 0; i < mChannelMasks.size(); i++) {
-        ALOGV("  0x%04x", mChannelMasks[i]);
-    }
-
-    ALOGV("    - formats: ");
-    for (size_t i = 0; i < mFormats.size(); i++) {
-        ALOGV("  0x%08x", mFormats[i]);
-    }
-
-    ALOGV("    - devices: 0x%04x\n", mSupportedDevices.types());
-    ALOGV("    - flags: 0x%04x\n", mFlags);
+    // @TODO: forward log to AudioPort
 }
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
new file mode 100644
index 0000000..3e5bb7d
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -0,0 +1,641 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::Serializer"
+//#define LOG_NDEBUG 0
+
+#include "Serializer.h"
+#include <convert/convert.h>
+#include "TypeConverter.h"
+#include <libxml/parser.h>
+#include <libxml/xinclude.h>
+#include <string>
+#include <sstream>
+#include <istream>
+
+using std::string;
+
+namespace android {
+
+string getXmlAttribute(const xmlNode *cur, const char *attribute)
+{
+    xmlChar *xmlValue = xmlGetProp(cur, (const xmlChar*)attribute);
+    if (xmlValue == NULL) {
+        return "";
+    }
+    string value((const char*)xmlValue);
+    xmlFree(xmlValue);
+    return value;
+}
+
+using utilities::convertTo;
+
+const char *const PolicySerializer::rootName = "audioPolicyConfiguration";
+const char *const PolicySerializer::versionAttribute = "version";
+const uint32_t PolicySerializer::gMajor = 1;
+const uint32_t PolicySerializer::gMinor = 0;
+static const char *const gReferenceElementName = "reference";
+static const char *const gReferenceAttributeName = "name";
+
+template <class Trait>
+static void getReference(const _xmlNode *root, const _xmlNode *&refNode, const string &refName)
+{
+    const _xmlNode *col = root;
+    while (col != NULL) {
+        if (!xmlStrcmp(col->name, (const xmlChar *)Trait::collectionTag)) {
+            const xmlNode *cur = col->children;
+            while (cur != NULL) {
+                if ((!xmlStrcmp(cur->name, (const xmlChar *)gReferenceElementName))) {
+                    string name = getXmlAttribute(cur, gReferenceAttributeName);
+                    if (refName == name) {
+                        refNode = cur;
+                        return;
+                    }
+                }
+                cur = cur->next;
+            }
+        }
+        col = col->next;
+    }
+    return;
+}
+
+template <class Trait>
+static status_t deserializeCollection(_xmlDoc *doc, const _xmlNode *cur,
+                                      typename Trait::Collection &collection,
+                                      typename Trait::PtrSerializingCtx serializingContext)
+{
+    const xmlNode *root = cur->xmlChildrenNode;
+    while (root != NULL) {
+        if (xmlStrcmp(root->name, (const xmlChar *)Trait::collectionTag) &&
+                xmlStrcmp(root->name, (const xmlChar *)Trait::tag)) {
+            root = root->next;
+            continue;
+        }
+        const xmlNode *child = root;
+        if (!xmlStrcmp(child->name, (const xmlChar *)Trait::collectionTag)) {
+            child = child->xmlChildrenNode;
+        }
+        while (child != NULL) {
+            if (!xmlStrcmp(child->name, (const xmlChar *)Trait::tag)) {
+                typename Trait::PtrElement element;
+                status_t status = Trait::deserialize(doc, child, element, serializingContext);
+                if (status != NO_ERROR) {
+                    return status;
+                }
+                if (collection.add(element) < 0) {
+                    ALOGE("%s: could not add element to %s collection", __FUNCTION__,
+                          Trait::collectionTag);
+                }
+            }
+            child = child->next;
+        }
+        if (!xmlStrcmp(root->name, (const xmlChar *)Trait::tag)) {
+            return NO_ERROR;
+        }
+        root = root->next;
+    }
+    return NO_ERROR;
+}
+
+const char *const AudioGainTraits::tag = "gain";
+const char *const AudioGainTraits::collectionTag = "gains";
+
+const char AudioGainTraits::Attributes::mode[] = "mode";
+const char AudioGainTraits::Attributes::channelMask[] = "channel_mask";
+const char AudioGainTraits::Attributes::minValueMB[] = "minValueMB";
+const char AudioGainTraits::Attributes::maxValueMB[] = "maxValueMB";
+const char AudioGainTraits::Attributes::defaultValueMB[] = "defaultValueMB";
+const char AudioGainTraits::Attributes::stepValueMB[] = "stepValueMB";
+const char AudioGainTraits::Attributes::minRampMs[] = "minRampMs";
+const char AudioGainTraits::Attributes::maxRampMs[] = "maxRampMs";
+
+status_t AudioGainTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *root, PtrElement &gain,
+                                      PtrSerializingCtx /*serializingContext*/)
+{
+    static uint32_t index = 0;
+    gain = new Element(index++, true);
+
+    string mode = getXmlAttribute(root, Attributes::mode);
+    if (!mode.empty()) {
+        gain->setMode(GainModeConverter::maskFromString(mode));
+    }
+
+    string channelsLiteral = getXmlAttribute(root, Attributes::channelMask);
+    if (!channelsLiteral.empty()) {
+        gain->setChannelMask(channelMaskFromString(channelsLiteral));
+    }
+
+    string minValueMBLiteral = getXmlAttribute(root, Attributes::minValueMB);
+    uint32_t minValueMB;
+    if (!minValueMBLiteral.empty() && convertTo(minValueMBLiteral, minValueMB)) {
+        gain->setMinValueInMb(minValueMB);
+    }
+
+    string maxValueMBLiteral = getXmlAttribute(root, Attributes::maxValueMB);
+    uint32_t maxValueMB;
+    if (!maxValueMBLiteral.empty() && convertTo(maxValueMBLiteral, maxValueMB)) {
+        gain->setMaxValueInMb(maxValueMB);
+    }
+
+    string defaultValueMBLiteral = getXmlAttribute(root, Attributes::defaultValueMB);
+    uint32_t defaultValueMB;
+    if (!defaultValueMBLiteral.empty() && convertTo(defaultValueMBLiteral, defaultValueMB)) {
+        gain->setDefaultValueInMb(defaultValueMB);
+    }
+
+    string stepValueMBLiteral = getXmlAttribute(root, Attributes::stepValueMB);
+    uint32_t stepValueMB;
+    if (!stepValueMBLiteral.empty() && convertTo(stepValueMBLiteral, stepValueMB)) {
+        gain->setStepValueInMb(stepValueMB);
+    }
+
+    string minRampMsLiteral = getXmlAttribute(root, Attributes::minRampMs);
+    uint32_t minRampMs;
+    if (!minRampMsLiteral.empty() && convertTo(minRampMsLiteral, minRampMs)) {
+        gain->setMinRampInMs(minRampMs);
+    }
+
+    string maxRampMsLiteral = getXmlAttribute(root, Attributes::maxRampMs);
+    uint32_t maxRampMs;
+    if (!maxRampMsLiteral.empty() && convertTo(maxRampMsLiteral, maxRampMs)) {
+        gain->setMaxRampInMs(maxRampMs);
+    }
+    ALOGV("%s: adding new gain mode %08x channel mask %08x min mB %d max mB %d", __FUNCTION__,
+          gain->getMode(), gain->getChannelMask(), gain->getMinValueInMb(),
+          gain->getMaxValueInMb());
+
+    if (gain->getMode() == 0) {
+        return BAD_VALUE;
+    }
+    return NO_ERROR;
+}
+
+const char *const AudioProfileTraits::collectionTag = "profiles";
+const char *const AudioProfileTraits::tag = "profile";
+
+const char AudioProfileTraits::Attributes::name[] = "name";
+const char AudioProfileTraits::Attributes::samplingRates[] = "samplingRates";
+const char AudioProfileTraits::Attributes::format[] = "format";
+const char AudioProfileTraits::Attributes::channelMasks[] = "channelMasks";
+
+status_t AudioProfileTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *root, PtrElement &profile,
+                                         PtrSerializingCtx /*serializingContext*/)
+{
+    string samplingRates = getXmlAttribute(root, Attributes::samplingRates);
+    string format = getXmlAttribute(root, Attributes::format);
+    string channels = getXmlAttribute(root, Attributes::channelMasks);
+
+    profile = new Element(formatFromString(format), channelMasksFromString(channels, ","),
+                          samplingRatesFromString(samplingRates, ","));
+
+    profile->setDynamicFormat(profile->getFormat() == gDynamicFormat);
+    profile->setDynamicChannels(profile->getChannels().isEmpty());
+    profile->setDynamicRate(profile->getSampleRates().isEmpty());
+
+    return NO_ERROR;
+}
+
+
+const char *const MixPortTraits::collectionTag = "mixPorts";
+const char *const MixPortTraits::tag = "mixPort";
+
+const char MixPortTraits::Attributes::name[] = "name";
+const char MixPortTraits::Attributes::role[] = "role";
+const char MixPortTraits::Attributes::flags[] = "flags";
+
+status_t MixPortTraits::deserialize(_xmlDoc *doc, const _xmlNode *child, PtrElement &mixPort,
+                                    PtrSerializingCtx /*serializingContext*/)
+{
+    string name = getXmlAttribute(child, Attributes::name);
+    if (name.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::name);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s %s=%s", __FUNCTION__, tag, Attributes::name, name.c_str());
+    string role = getXmlAttribute(child, Attributes::role);
+    if (role.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::role);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: Role=%s", __FUNCTION__, role.c_str());
+    audio_port_role_t portRole = role == "source" ? AUDIO_PORT_ROLE_SOURCE : AUDIO_PORT_ROLE_SINK;
+
+    mixPort = new Element(String8(name.c_str()), portRole);
+
+    AudioProfileTraits::Collection profiles;
+    deserializeCollection<AudioProfileTraits>(doc, child, profiles, NULL);
+    if (profiles.isEmpty()) {
+        sp <AudioProfile> dynamicProfile = new AudioProfile(gDynamicFormat,
+                                                            ChannelsVector(), SampleRateVector());
+        dynamicProfile->setDynamicFormat(true);
+        dynamicProfile->setDynamicChannels(true);
+        dynamicProfile->setDynamicRate(true);
+        profiles.add(dynamicProfile);
+    }
+    mixPort->setAudioProfiles(profiles);
+
+    string flags = getXmlAttribute(child, Attributes::flags);
+    if (!flags.empty()) {
+        // Source role
+        if (portRole == AUDIO_PORT_ROLE_SOURCE) {
+            mixPort->setFlags(OutputFlagConverter::maskFromString(flags));
+        } else {
+            // Sink role
+            mixPort->setFlags(InputFlagConverter::maskFromString(flags));
+        }
+    }
+    // Deserialize children
+    AudioGainTraits::Collection gains;
+    deserializeCollection<AudioGainTraits>(doc, child, gains, NULL);
+    mixPort->setGains(gains);
+
+    return NO_ERROR;
+}
+
+const char *const DevicePortTraits::tag = "devicePort";
+const char *const DevicePortTraits::collectionTag = "devicePorts";
+
+const char DevicePortTraits::Attributes::tagName[] = "tagName";
+const char DevicePortTraits::Attributes::type[] = "type";
+const char DevicePortTraits::Attributes::role[] = "role";
+const char DevicePortTraits::Attributes::address[] = "address";
+const char DevicePortTraits::Attributes::roleSource[] = "source";
+
+status_t DevicePortTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &deviceDesc,
+                                       PtrSerializingCtx /*serializingContext*/)
+{
+    string name = getXmlAttribute(root, Attributes::tagName);
+    if (name.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::tagName);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s %s=%s", __FUNCTION__, tag, Attributes::tagName, name.c_str());
+    string typeName = getXmlAttribute(root, Attributes::type);
+    if (typeName.empty()) {
+        ALOGE("%s: no type for %s", __FUNCTION__, name.c_str());
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s %s=%s", __FUNCTION__, tag, Attributes::type, typeName.c_str());
+    string role = getXmlAttribute(root, Attributes::role);
+    if (role.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::role);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s %s=%s", __FUNCTION__, tag, Attributes::role, role.c_str());
+    audio_port_role_t portRole = (role == Attributes::roleSource) ?
+                AUDIO_PORT_ROLE_SOURCE : AUDIO_PORT_ROLE_SINK;
+
+    audio_devices_t type = AUDIO_DEVICE_NONE;
+    if (!DeviceConverter::fromString(typeName, type) ||
+            (!audio_is_input_device(type) && portRole == AUDIO_PORT_ROLE_SOURCE) ||
+            (!audio_is_output_devices(type) && portRole == AUDIO_PORT_ROLE_SINK)) {
+        ALOGW("%s: bad type %08x", __FUNCTION__, type);
+        return BAD_VALUE;
+    }
+    deviceDesc = new Element(type, String8(name.c_str()));
+
+    string address = getXmlAttribute(root, Attributes::address);
+    if (!address.empty()) {
+        ALOGV("%s: address=%s for %s", __FUNCTION__, address.c_str(), name.c_str());
+        deviceDesc->mAddress = String8(address.c_str());
+    }
+
+    AudioProfileTraits::Collection profiles;
+    deserializeCollection<AudioProfileTraits>(doc, root, profiles, NULL);
+    if (profiles.isEmpty()) {
+        sp <AudioProfile> dynamicProfile = new AudioProfile(gDynamicFormat,
+                                                            ChannelsVector(), SampleRateVector());
+        dynamicProfile->setDynamicFormat(true);
+        dynamicProfile->setDynamicChannels(true);
+        dynamicProfile->setDynamicRate(true);
+        profiles.add(dynamicProfile);
+    }
+    deviceDesc->setAudioProfiles(profiles);
+
+    // Deserialize AudioGain children
+    deserializeCollection<AudioGainTraits>(doc, root, deviceDesc->mGains, NULL);
+    ALOGV("%s: adding device tag %s type %08x address %s", __FUNCTION__,
+          deviceDesc->getName().string(), type, deviceDesc->mAddress.string());
+    return NO_ERROR;
+}
+
+const char *const RouteTraits::tag = "route";
+const char *const RouteTraits::collectionTag = "routes";
+
+const char RouteTraits::Attributes::type[] = "type";
+const char RouteTraits::Attributes::typeMix[] = "mix";
+const char RouteTraits::Attributes::sink[] = "sink";
+const char RouteTraits::Attributes::sources[] = "sources";
+
+
+status_t RouteTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *root, PtrElement &element,
+                                  PtrSerializingCtx ctx)
+{
+    string type = getXmlAttribute(root, Attributes::type);
+    if (type.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::type);
+        return BAD_VALUE;
+    }
+    audio_route_type_t routeType = (type == Attributes::typeMix) ?
+                AUDIO_ROUTE_MIX : AUDIO_ROUTE_MUX;
+
+    ALOGV("%s: %s %s=%s", __FUNCTION__, tag, Attributes::type, type.c_str());
+    element = new Element(routeType);
+
+    string sinkAttr = getXmlAttribute(root, Attributes::sink);
+    if (sinkAttr.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::sink);
+        return BAD_VALUE;
+    }
+    // Convert Sink name to port pointer
+    sp<AudioPort> sink = ctx->findPortByTagName(String8(sinkAttr.c_str()));
+    if (sink == NULL) {
+        ALOGE("%s: no sink found with name=%s", __FUNCTION__, sinkAttr.c_str());
+        return BAD_VALUE;
+    }
+    element->setSink(sink);
+
+    string sourcesAttr = getXmlAttribute(root, Attributes::sources);
+    if (sourcesAttr.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::sources);
+        return BAD_VALUE;
+    }
+    // Tokenize and Convert Sources name to port pointer
+    AudioPortVector sources;
+    char *sourcesLiteral = strndup(sourcesAttr.c_str(), strlen(sourcesAttr.c_str()));
+    char *devTag = strtok(sourcesLiteral, ",");
+    while (devTag != NULL) {
+        if (strlen(devTag) != 0) {
+            sp<AudioPort> source = ctx->findPortByTagName(String8(devTag));
+            if (source == NULL) {
+                ALOGE("%s: no source found with name=%s", __FUNCTION__, devTag);
+                return BAD_VALUE;
+            }
+            sources.add(source);
+        }
+        devTag = strtok(NULL, ",");
+    }
+    free(sourcesLiteral);
+
+    sink->addRoute(element);
+    for (size_t i = 0; i < sources.size(); i++) {
+        sp<AudioPort> source = sources.itemAt(i);
+        source->addRoute(element);
+    }
+    element->setSources(sources);
+    return NO_ERROR;
+}
+
+const char *const ModuleTraits::childAttachedDevicesTag = "attachedDevices";
+const char *const ModuleTraits::childAttachedDeviceTag = "item";
+const char *const ModuleTraits::childDefaultOutputDeviceTag = "defaultOutputDevice";
+
+const char *const ModuleTraits::tag = "module";
+const char *const ModuleTraits::collectionTag = "modules";
+
+const char ModuleTraits::Attributes::name[] = "name";
+const char ModuleTraits::Attributes::version[] = "halVersion";
+
+status_t ModuleTraits::deserialize(xmlDocPtr doc, const xmlNode *root, PtrElement &module,
+                                   PtrSerializingCtx ctx)
+{
+    string name = getXmlAttribute(root, Attributes::name);
+    if (name.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::name);
+        return BAD_VALUE;
+    }
+    uint32_t version = AUDIO_DEVICE_API_VERSION_MIN;
+    string versionLiteral = getXmlAttribute(root, Attributes::version);
+    if (!versionLiteral.empty()) {
+        uint32_t major, minor;
+        sscanf(versionLiteral.c_str(), "%u.%u", &major, &minor);
+        version = HARDWARE_DEVICE_API_VERSION(major, minor);
+        ALOGV("%s: mHalVersion = %04x major %u minor %u",  __FUNCTION__,
+              version, major, minor);
+    }
+
+    ALOGV("%s: %s %s=%s", __FUNCTION__, tag, Attributes::name, name.c_str());
+
+    module = new Element(name.c_str(), version);
+
+    // Deserialize childrens: Audio Mix Port, Audio Device Ports (Source/Sink), Audio Routes
+    MixPortTraits::Collection mixPorts;
+    deserializeCollection<MixPortTraits>(doc, root, mixPorts, NULL);
+    module->setProfiles(mixPorts);
+
+    DevicePortTraits::Collection devicePorts;
+    deserializeCollection<DevicePortTraits>(doc, root, devicePorts, NULL);
+    module->setDeclaredDevices(devicePorts);
+
+    RouteTraits::Collection routes;
+    deserializeCollection<RouteTraits>(doc, root, routes, module.get());
+    module->setRoutes(routes);
+
+    const xmlNode *children = root->xmlChildrenNode;
+    while (children != NULL) {
+        if (!xmlStrcmp(children->name, (const xmlChar *)childAttachedDevicesTag)) {
+            ALOGV("%s: %s %s found", __FUNCTION__, tag, childAttachedDevicesTag);
+            const xmlNode *child = children->xmlChildrenNode;
+            while (child != NULL) {
+                if (!xmlStrcmp(child->name, (const xmlChar *)childAttachedDeviceTag)) {
+                    xmlChar *attachedDevice = xmlNodeListGetString(doc, child->xmlChildrenNode, 1);
+                    if (attachedDevice != NULL) {
+                        ALOGV("%s: %s %s=%s", __FUNCTION__, tag, childAttachedDeviceTag,
+                              (const char*)attachedDevice);
+                        sp<DeviceDescriptor> device =
+                                module->getDeclaredDevices().getDeviceFromTagName(String8((const char*)attachedDevice));
+                        ctx->addAvailableDevice(device);
+                        xmlFree(attachedDevice);
+                    }
+                }
+                child = child->next;
+            }
+        }
+        if (!xmlStrcmp(children->name, (const xmlChar *)childDefaultOutputDeviceTag)) {
+            xmlChar *defaultOutputDevice = xmlNodeListGetString(doc, children->xmlChildrenNode, 1);;
+            if (defaultOutputDevice != NULL) {
+                ALOGV("%s: %s %s=%s", __FUNCTION__, tag, childDefaultOutputDeviceTag,
+                      (const char*)defaultOutputDevice);
+                sp<DeviceDescriptor> device =
+                        module->getDeclaredDevices().getDeviceFromTagName(String8((const char*)defaultOutputDevice));
+                if (device != 0 && ctx->getDefaultOutputDevice() == 0) {
+                    ctx->setDefaultOutputDevice(device);
+                    ALOGV("%s: default is %08x", __FUNCTION__, ctx->getDefaultOutputDevice()->type());
+                }
+                xmlFree(defaultOutputDevice);
+            }
+        }
+        children = children->next;
+    }
+    return NO_ERROR;
+}
+
+const char *const GlobalConfigTraits::tag = "globalConfiguration";
+
+const char GlobalConfigTraits::Attributes::speakerDrcEnabled[] = "speaker_drc_enabled";
+
+
+status_t GlobalConfigTraits::deserialize(const xmlNode *cur, AudioPolicyConfig &config)
+{
+    const xmlNode *root = cur->xmlChildrenNode;
+    while (root != NULL) {
+        if (!xmlStrcmp(root->name, (const xmlChar *)tag)) {
+            string speakerDrcEnabled =
+                    getXmlAttribute(root, Attributes::speakerDrcEnabled);
+            bool isSpeakerDrcEnabled;
+            if (!speakerDrcEnabled.empty() &&
+                    convertTo<string, bool>(speakerDrcEnabled, isSpeakerDrcEnabled)) {
+                config.setSpeakerDrcEnabled(isSpeakerDrcEnabled);
+            }
+            return NO_ERROR;
+        }
+        root = root->next;
+    }
+    return NO_ERROR;
+}
+
+
+const char *const VolumeTraits::tag = "volume";
+const char *const VolumeTraits::collectionTag = "volumes";
+const char *const VolumeTraits::volumePointTag = "point";
+
+const char VolumeTraits::Attributes::stream[] = "stream";
+const char VolumeTraits::Attributes::deviceCategory[] = "deviceCategory";
+const char VolumeTraits::Attributes::reference[] = "ref";
+
+status_t VolumeTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
+                                   PtrSerializingCtx /*serializingContext*/)
+{
+    string streamTypeLiteral = getXmlAttribute(root, Attributes::stream);
+    if (streamTypeLiteral.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::stream);
+        return BAD_VALUE;
+    }
+    audio_stream_type_t streamType;
+    if (!StreamTypeConverter::fromString(streamTypeLiteral, streamType)) {
+        ALOGE("%s: Invalid %s", __FUNCTION__, Attributes::stream);
+        return BAD_VALUE;
+    }
+    string deviceCategoryLiteral = getXmlAttribute(root, Attributes::deviceCategory);
+    if (deviceCategoryLiteral.empty()) {
+        ALOGE("%s: No %s found", __FUNCTION__, Attributes::deviceCategory);
+        return BAD_VALUE;
+    }
+    device_category deviceCategory;
+    if (!DeviceCategoryConverter::fromString(deviceCategoryLiteral, deviceCategory)) {
+        ALOGE("%s: Invalid %s=%s", __FUNCTION__, Attributes::deviceCategory,
+              deviceCategoryLiteral.c_str());
+        return BAD_VALUE;
+    }
+
+    string referenceName = getXmlAttribute(root, Attributes::reference);
+    const _xmlNode *ref = NULL;
+    if (!referenceName.empty()) {
+        getReference<VolumeTraits>(root->parent, ref, referenceName);
+        if (ref == NULL) {
+            ALOGE("%s: No reference Ptr found for %s", __FUNCTION__, referenceName.c_str());
+            return BAD_VALUE;
+        }
+    }
+
+    element = new Element(deviceCategory, streamType);
+
+    const xmlNode *child = referenceName.empty() ? root->xmlChildrenNode : ref->xmlChildrenNode;
+    while (child != NULL) {
+        if (!xmlStrcmp(child->name, (const xmlChar *)volumePointTag)) {
+            xmlChar *pointDefinition = xmlNodeListGetString(doc, child->xmlChildrenNode, 1);;
+            if (pointDefinition == NULL) {
+                return BAD_VALUE;
+            }
+            ALOGV("%s: %s=%s", __FUNCTION__, tag, (const char*)pointDefinition);
+            Vector<int32_t> point;
+            collectionFromString<DefaultTraits<int32_t> >((const char*)pointDefinition, point, ",");
+            if (point.size() != 2) {
+                ALOGE("%s: Invalid %s: %s", __FUNCTION__, volumePointTag,
+                      (const char*)pointDefinition);
+                return BAD_VALUE;
+            }
+            element->add(CurvePoint(point[0], point[1]));
+            xmlFree(pointDefinition);
+        }
+        child = child->next;
+    }
+    return NO_ERROR;
+}
+
+PolicySerializer::PolicySerializer() : mRootElementName(rootName)
+{
+    std::ostringstream oss;
+    oss << gMajor << "." << gMinor;
+    mVersion = oss.str();
+    ALOGV("%s: Version=%s Root=%s", __FUNCTION__, mVersion.c_str(), mRootElementName.c_str());
+}
+
+status_t PolicySerializer::deserialize(const char *configFile, AudioPolicyConfig &config)
+{
+    xmlDocPtr doc;
+    doc = xmlParseFile(configFile);
+    if (doc == NULL) {
+        ALOGE("%s: Could not parse %s document.", __FUNCTION__, configFile);
+        return BAD_VALUE;
+    }
+    xmlNodePtr cur = xmlDocGetRootElement(doc);
+    if (cur == NULL) {
+        ALOGE("%s: Could not parse %s document: empty.", __FUNCTION__, configFile);
+        xmlFreeDoc(doc);
+        return BAD_VALUE;
+    }
+    if (xmlXIncludeProcess(doc) < 0) {
+         ALOGE("%s: libxml failed to resolve XIncludes on %s document.", __FUNCTION__, configFile);
+    }
+
+    if (xmlStrcmp(cur->name, (const xmlChar *) mRootElementName.c_str()))  {
+        ALOGE("%s: No %s root element found in xml data %s.", __FUNCTION__, mRootElementName.c_str(),
+              (const char *)cur->name);
+        xmlFreeDoc(doc);
+        return BAD_VALUE;
+    }
+
+    string version = getXmlAttribute(cur, versionAttribute);
+    if (version.empty()) {
+        ALOGE("%s: No version found in root node %s", __FUNCTION__, mRootElementName.c_str());
+        return BAD_VALUE;
+    }
+    if (version != mVersion) {
+        ALOGE("%s: Version does not match; expect %s got %s", __FUNCTION__, mVersion.c_str(),
+              version.c_str());
+        return BAD_VALUE;
+    }
+    // Lets deserialize children
+    // Modules
+    ModuleTraits::Collection modules;
+    deserializeCollection<ModuleTraits>(doc, cur, modules, &config);
+    config.setHwModules(modules);
+
+    // deserialize volume section
+    VolumeTraits::Collection volumes;
+    deserializeCollection<VolumeTraits>(doc, cur, volumes, &config);
+    config.setVolumes(volumes);
+
+    // Global Configuration
+    GlobalConfigTraits::deserialize(cur, config);
+
+    xmlFreeDoc(doc);
+    return android::OK;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
index b682e2c..b3019e1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
@@ -25,6 +25,8 @@
 #endif
 
 #include "StreamDescriptor.h"
+#include "Gains.h"
+#include "policy.h"
 #include <utils/Log.h>
 #include <utils/String8.h>
 
@@ -35,15 +37,18 @@
 StreamDescriptor::StreamDescriptor()
     :   mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
 {
-    mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0);
+    // Initialize the current stream's index to mIndexMax so volume isn't 0 in
+    // cases where the Java layer doesn't call into the audio policy service to
+    // set the default volume.
+    mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, mIndexMax);
 }
 
 int StreamDescriptor::getVolumeIndex(audio_devices_t device) const
 {
     device = Volume::getDeviceForVolume(device);
-    // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT
+    // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
     if (mIndexCur.indexOfKey(device) < 0) {
-        device = AUDIO_DEVICE_OUT_DEFAULT;
+        device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
     }
     return mIndexCur.valueFor(device);
 }
@@ -68,7 +73,7 @@
     mIndexMax = volIndexMax;
 }
 
-void StreamDescriptor::setVolumeCurvePoint(Volume::device_category deviceCategory,
+void StreamDescriptor::setVolumeCurvePoint(device_category deviceCategory,
                                            const VolumeCurvePoint *point)
 {
     mVolumeCurve[deviceCategory] = point;
@@ -118,14 +123,14 @@
 }
 
 void StreamDescriptorCollection::setVolumeCurvePoint(audio_stream_type_t stream,
-                                                     Volume::device_category deviceCategory,
+                                                     device_category deviceCategory,
                                                      const VolumeCurvePoint *point)
 {
     editValueAt(stream).setVolumeCurvePoint(deviceCategory, point);
 }
 
 const VolumeCurvePoint *StreamDescriptorCollection::getVolumeCurvePoint(audio_stream_type_t stream,
-                                                                        Volume::device_category deviceCategory) const
+                                                                        device_category deviceCategory) const
 {
     return valueAt(stream).getVolumeCurvePoint(deviceCategory);
 }
@@ -140,6 +145,65 @@
     return editValueAt(stream).setVolumeIndexMax(volIndexMax);
 }
 
+float StreamDescriptorCollection::volIndexToDb(audio_stream_type_t stream, device_category category,
+                                               int indexInUi) const
+{
+    const StreamDescriptor &streamDesc = valueAt(stream);
+    return Gains::volIndexToDb(streamDesc.getVolumeCurvePoint(category),
+                               streamDesc.getVolumeIndexMin(), streamDesc.getVolumeIndexMax(),
+                               indexInUi);
+}
+
+status_t StreamDescriptorCollection::initStreamVolume(audio_stream_type_t stream,
+                                                      int indexMin, int indexMax)
+{
+    ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+    if (indexMin < 0 || indexMin >= indexMax) {
+        ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d",
+              stream , indexMin, indexMax);
+        return BAD_VALUE;
+    }
+    setVolumeIndexMin(stream, indexMin);
+    setVolumeIndexMax(stream, indexMax);
+    return NO_ERROR;
+}
+
+void StreamDescriptorCollection::initializeVolumeCurves(bool isSpeakerDrcEnabled)
+{
+    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+        for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
+            setVolumeCurvePoint(static_cast<audio_stream_type_t>(i),
+                                static_cast<device_category>(j),
+                                Gains::sVolumeProfiles[i][j]);
+        }
+    }
+
+    // Check availability of DRC on speaker path: if available, override some of the speaker curves
+    if (isSpeakerDrcEnabled) {
+        setVolumeCurvePoint(AUDIO_STREAM_SYSTEM, DEVICE_CATEGORY_SPEAKER,
+                            Gains::sDefaultSystemVolumeCurveDrc);
+        setVolumeCurvePoint(AUDIO_STREAM_RING, DEVICE_CATEGORY_SPEAKER,
+                            Gains::sSpeakerSonificationVolumeCurveDrc);
+        setVolumeCurvePoint(AUDIO_STREAM_ALARM, DEVICE_CATEGORY_SPEAKER,
+                            Gains::sSpeakerSonificationVolumeCurveDrc);
+        setVolumeCurvePoint(AUDIO_STREAM_NOTIFICATION, DEVICE_CATEGORY_SPEAKER,
+                            Gains::sSpeakerSonificationVolumeCurveDrc);
+        setVolumeCurvePoint(AUDIO_STREAM_MUSIC, DEVICE_CATEGORY_SPEAKER,
+                            Gains::sSpeakerMediaVolumeCurveDrc);
+        setVolumeCurvePoint(AUDIO_STREAM_ACCESSIBILITY, DEVICE_CATEGORY_SPEAKER,
+                            Gains::sSpeakerMediaVolumeCurveDrc);
+    }
+}
+
+void StreamDescriptorCollection::switchVolumeCurve(audio_stream_type_t streamSrc,
+                                                   audio_stream_type_t streamDst)
+{
+    for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
+        setVolumeCurvePoint(streamDst, static_cast<device_category>(j),
+                            Gains::sVolumeProfiles[streamSrc][j]);
+    }
+}
+
 status_t StreamDescriptorCollection::dump(int fd) const
 {
     const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
new file mode 100644
index 0000000..f639551
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TypeConverter.h"
+
+namespace android {
+
+#define MAKE_STRING_FROM_ENUM(string) { #string, string }
+
+template <>
+const DeviceConverter::Table DeviceConverter::mTable[] = {
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_LINE),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPDIF),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_FM),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LINE),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_SPDIF),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
+        MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
+};
+
+template<>
+const size_t DeviceConverter::mSize = sizeof(DeviceConverter::mTable) /
+        sizeof(DeviceConverter::mTable[0]);
+
+
+template <>
+const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_FAST),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_TTS),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
+    MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
+};
+template<>
+const size_t OutputFlagConverter::mSize = sizeof(OutputFlagConverter::mTable) /
+        sizeof(OutputFlagConverter::mTable[0]);
+
+
+template <>
+const InputFlagConverter::Table InputFlagConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_FAST),
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
+    MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
+};
+template<>
+const size_t InputFlagConverter::mSize = sizeof(InputFlagConverter::mTable) /
+        sizeof(InputFlagConverter::mTable[0]);
+
+
+template <>
+const FormatConverter::Table FormatConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_16_BIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_BIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_32_BIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_FLOAT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP3),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_MAIN),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SSR),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LTP),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ERLC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V2),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ELD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_VORBIS),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V2),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_OPUS),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC3),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_E_AC3),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS_HD),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
+};
+template<>
+const size_t FormatConverter::mSize = sizeof(FormatConverter::mTable) /
+        sizeof(FormatConverter::mTable[0]);
+
+
+template <>
+const OutputChannelConverter::Table OutputChannelConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+};
+template<>
+const size_t OutputChannelConverter::mSize = sizeof(OutputChannelConverter::mTable) /
+        sizeof(OutputChannelConverter::mTable[0]);
+
+
+template <>
+const InputChannelConverter::Table InputChannelConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+};
+template<>
+const size_t InputChannelConverter::mSize = sizeof(InputChannelConverter::mTable) /
+        sizeof(InputChannelConverter::mTable[0]);
+
+template <>
+const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
+    {"AUDIO_CHANNEL_INDEX_MASK_1", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_1)},
+    {"AUDIO_CHANNEL_INDEX_MASK_2", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_2)},
+    {"AUDIO_CHANNEL_INDEX_MASK_3", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_3)},
+    {"AUDIO_CHANNEL_INDEX_MASK_4", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_4)},
+    {"AUDIO_CHANNEL_INDEX_MASK_5", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_5)},
+    {"AUDIO_CHANNEL_INDEX_MASK_6", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_6)},
+    {"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
+    {"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
+};
+template<>
+const size_t ChannelIndexConverter::mSize = sizeof(ChannelIndexConverter::mTable) /
+        sizeof(ChannelIndexConverter::mTable[0]);
+
+
+template <>
+const GainModeConverter::Table GainModeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_JOINT),
+    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_CHANNELS),
+    MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
+};
+
+template<>
+const size_t GainModeConverter::mSize = sizeof(GainModeConverter::mTable) /
+        sizeof(GainModeConverter::mTable[0]);
+
+template <>
+const DeviceCategoryConverter::Table DeviceCategoryConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_HEADSET),
+    MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_SPEAKER),
+    MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EARPIECE),
+    MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA)
+};
+
+template<>
+const size_t DeviceCategoryConverter::mSize = sizeof(DeviceCategoryConverter::mTable) /
+        sizeof(DeviceCategoryConverter::mTable[0]);
+
+template <>
+const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_VOICE_CALL),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_SYSTEM),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_RING),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_MUSIC),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ALARM),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_NOTIFICATION),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_BLUETOOTH_SCO ),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ENFORCED_AUDIBLE),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DTMF),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_TTS),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ACCESSIBILITY),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_REROUTING),
+    MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
+};
+
+template<>
+const size_t StreamTypeConverter::mSize = sizeof(StreamTypeConverter::mTable) /
+        sizeof(StreamTypeConverter::mTable[0]);
+
+template <class Traits>
+bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
+{
+    for (size_t i = 0; i < mSize; i++) {
+        if (mTable[i].value == value) {
+            str = mTable[i].literal;
+            return true;
+        }
+    }
+    return false;
+}
+
+template <class Traits>
+bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
+{
+    for (size_t i = 0; i < mSize; i++) {
+        if (strcmp(mTable[i].literal, str.c_str()) == 0) {
+            ALOGV("stringToEnum() found %s", mTable[i].literal);
+            result = mTable[i].value;
+            return true;
+        }
+    }
+    return false;
+}
+
+template <class Traits>
+void TypeConverter<Traits>::collectionFromString(const std::string &str,
+                                                 typename Traits::Collection &collection,
+                                                 const char *del)
+{
+    char *literal = strdup(str.c_str());
+
+    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+        typename Traits::Type value;
+        if (fromString(cstr, value)) {
+            collection.add(value);
+        }
+    }
+    free(literal);
+}
+
+template <class Traits>
+uint32_t TypeConverter<Traits>::maskFromString(const std::string &str, const char *del)
+{
+    char *literal = strdup(str.c_str());
+    uint32_t value = 0;
+    for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+        typename Traits::Type type;
+        if (fromString(cstr, type)) {
+            value |= static_cast<uint32_t>(type);
+        }
+    }
+    free(literal);
+    return value;
+}
+
+template class TypeConverter<DeviceTraits>;
+template class TypeConverter<OutputFlagTraits>;
+template class TypeConverter<InputFlagTraits>;
+template class TypeConverter<FormatTraits>;
+template class TypeConverter<OutputChannelTraits>;
+template class TypeConverter<InputChannelTraits>;
+template class TypeConverter<ChannelIndexTraits>;
+template class TypeConverter<GainModeTraits>;
+template class TypeConverter<StreamTraits>;
+template class TypeConverter<DeviceCategoryTraits>;
+
+}; // namespace android
+
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
new file mode 100644
index 0000000..ab2b51f
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::VolumeCurve"
+//#define LOG_NDEBUG 0
+
+#include "VolumeCurve.h"
+#include "TypeConverter.h"
+
+namespace android {
+
+float VolumeCurve::volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const
+{
+    ALOG_ASSERT(!mCurvePoints.isEmpty(), "Invalid volume curve");
+
+    size_t nbCurvePoints = mCurvePoints.size();
+    // the volume index in the UI is relative to the min and max volume indices for this stream
+    int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
+    int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
+
+    // Where would this volume index been inserted in the curve point
+    size_t indexInUiPosition = mCurvePoints.orderOf(CurvePoint(volIdx, 0));
+    if (indexInUiPosition >= nbCurvePoints) {
+        return 0.0f; // out of bounds
+    }
+    if (indexInUiPosition == 0) {
+        if (indexInUiPosition != mCurvePoints[0].mIndex) {
+            return VOLUME_MIN_DB; // out of bounds
+        }
+        return mCurvePoints[0].mAttenuationInMb / 100.0f;
+    }
+    // linear interpolation in the attenuation table in dB
+    float decibels = (mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f) +
+            ((float)(volIdx - mCurvePoints[indexInUiPosition - 1].mIndex)) *
+                ( ((mCurvePoints[indexInUiPosition].mAttenuationInMb / 100.0f) -
+                        (mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f)) /
+                    ((float)(mCurvePoints[indexInUiPosition].mIndex -
+                            mCurvePoints[indexInUiPosition - 1].mIndex)) );
+
+    ALOGV("VOLUME mDeviceCategory %d, mStreamType %d vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
+            mDeviceCategory, mStreamType,
+            mCurvePoints[indexInUiPosition - 1].mIndex, volIdx,
+            mCurvePoints[indexInUiPosition].mIndex,
+            ((float)mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f), decibels,
+            ((float)mCurvePoints[indexInUiPosition].mAttenuationInMb / 100.0f));
+
+    return decibels;
+}
+
+void VolumeCurve::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+    snprintf(buffer, SIZE, " {");
+    result.append(buffer);
+    for (size_t i = 0; i < mCurvePoints.size(); i++) {
+        snprintf(buffer, SIZE, "(%3d, %5d)",
+                 mCurvePoints[i].mIndex, mCurvePoints[i].mAttenuationInMb);
+        result.append(buffer);
+        result.append(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
+    }
+    write(fd, result.string(), result.size());
+}
+
+void VolumeCurvesForStream::dump(int fd, int spaces = 0, bool curvePoints) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    if (!curvePoints) {
+        snprintf(buffer, SIZE, "%s         %02d         %02d         ",
+                 mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+        result.append(buffer);
+        for (size_t i = 0; i < mIndexCur.size(); i++) {
+            snprintf(buffer, SIZE, "%04x : %02d, ", mIndexCur.keyAt(i), mIndexCur.valueAt(i));
+            result.append(buffer);
+        }
+        result.append("\n");
+        write(fd, result.string(), result.size());
+        return;
+    }
+
+    for (size_t i = 0; i < size(); i++) {
+        std::string deviceCatLiteral;
+        DeviceCategoryConverter::toString(keyAt(i), deviceCatLiteral);
+        snprintf(buffer, SIZE, "%*s %s :",
+                 spaces, "", deviceCatLiteral.c_str());
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+    result.append("\n");
+    write(fd, result.string(), result.size());
+}
+
+status_t VolumeCurvesCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nStreams dump:\n");
+    write(fd, buffer, strlen(buffer));
+    snprintf(buffer, SIZE,
+             " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, " %02zu      ", i);
+        write(fd, buffer, strlen(buffer));
+        valueAt(i).dump(fd);
+    }
+    snprintf(buffer, SIZE, "\nVolume Curves for Use Cases (aka Stream types) dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        std::string streamTypeLiteral;
+        StreamTypeConverter::toString(keyAt(i), streamTypeLiteral);
+        snprintf(buffer, SIZE,
+                 " %s (%02zu): Curve points for device category (index, attenuation in millibel)\n",
+                 streamTypeLiteral.c_str(), i);
+        write(fd, buffer, strlen(buffer));
+        valueAt(i).dump(fd, 2, true);
+    }
+
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/config/a2dp_audio_policy_configuration.xml b/services/audiopolicy/config/a2dp_audio_policy_configuration.xml
new file mode 100644
index 0000000..ced7463
--- /dev/null
+++ b/services/audiopolicy/config/a2dp_audio_policy_configuration.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- A2dp Audio HAL Audio Policy Configuration file -->
+<module name="a2dp" halVersion="2.0">
+    <mixPorts>
+        <mixPort name="a2dp output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="a2dp input" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+        </mixPort>
+    </mixPorts>
+    <devicePorts>
+        <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+        </devicePort>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="BT A2DP Out"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Headphones"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Speaker"
+               sources="a2dp output"/>
+        <route type="mix" sink="a2dp input"
+               sources="BT A2DP In"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
new file mode 100644
index 0000000..7af2f81
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -0,0 +1,216 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+    <!-- Global configuration Decalaration -->
+    <globalConfiguration speaker_drc_enabled="true"/>
+
+
+    <!-- Modules section:
+        There is one section per audio HW module present on the platform.
+        Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”.
+        The module names are the same as in current .conf file:
+                “primary”, “A2DP”, “remote_submix”, “USB”
+        Each module will contain the following sections:
+        “devicePorts”: a list of device descriptors for all input and output devices accessible via this
+        module.
+        This contains both permanently attached devices and removable devices.
+        “mixPorts”: listing all output and input streams exposed by the audio HAL
+        “routes”: list of possible connections between input and output devices or between stream and
+        devices.
+            "route": is defined by an attribute:
+                -"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix)
+                -"sink": the sink involved in this route
+                -"sources": all the sources than can be connected to the sink via vis route
+        “attachedDevices”: permanently attached devices.
+        The attachedDevices section is a list of devices names. The names correspond to device names
+        defined in <devicePorts> section.
+        “defaultOutputDevice”: device to be used by default when no policy rule applies
+    -->
+    <modules>
+        <!-- Primary Audio HAL -->
+        <module name="primary" halVersion="3.0">
+            <attachedDevices>
+                <item>Speaker</item>
+                <item>Built-In Mic</item>
+                <item>Built-In Back Mic</item>
+            </attachedDevices>
+            <defaultOutputDevice>Speaker</defaultOutputDevice>
+            <mixPorts>
+                <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="deep_buffer" role="source"
+                        flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="compressed_offload" role="source"
+                         flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+                    <profile name="" format="AUDIO_FORMAT_MP3"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+                    <profile name="" format="AUDIO_FORMAT_AAC"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+                    <profile name="" format="AUDIO_FORMAT_AAC_LC"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+                </mixPort>
+                <mixPort name="voice_tx" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </mixPort>
+                <mixPort name="primary input" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+                </mixPort>
+                <mixPort name="voice_rx" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+                </mixPort>
+            </mixPorts>
+            <devicePorts>
+                <!-- Output devices declaration, i.e. Sink DEVICE PORT -->
+                <devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
+                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                            samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+                </devicePort>
+                <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                    <gains>
+                        <gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT"
+                              minValueMB="-8400"
+                              maxValueMB="4000"
+                              defaultValueMB="0"
+                              stepValueMB="100"/>
+                    </gains>
+                </devicePort>
+                <devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </devicePort>
+                <devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </devicePort>
+                <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </devicePort>
+                <devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </devicePort>
+                <devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </devicePort>
+                <devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </devicePort>
+
+                <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+                </devicePort>
+                <devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+                </devicePort>
+                <devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+                </devicePort>
+                <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+                </devicePort>
+                <devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+                </devicePort>
+            </devicePorts>
+            <!-- route declaration, i.e. list all available sources for a given sink -->
+            <routes>
+                <route type="mix" sink="Earpiece"
+                       sources="primary output,deep_buffer,BT SCO Headset Mic"/>
+                <route type="mix" sink="Speaker"
+                       sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+                <route type="mix" sink="Wired Headset"
+                       sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+                <route type="mix" sink="Wired Headphones"
+                       sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+                <route type="mix" sink="Telephony Tx"
+                       sources="voice_tx"/>
+                <route type="mix" sink="primary input"
+                       sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
+                <route type="mix" sink="Telephony Tx"
+                       sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
+                <route type="mix" sink="voice_rx"
+                       sources="Telephony Rx"/>
+            </routes>
+
+        </module>
+
+        <!-- HDMI Audio HAL -->
+        <module description="HDMI Audio HAL" name="hdmi" version="2.0">
+            <mixPorts>
+                <mixPort name="hdmi output" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000"/>
+                </mixPort>
+            </mixPorts>
+            <devicePorts>
+                <devicePort tagName="HDMI Out" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </devicePort>
+            </devicePorts>
+            <routes>
+                <route type="mix" sink="HDMI Out"
+                       sources="hdmi output"/>
+            </routes>
+        </module>
+
+        <!-- A2dp Audio HAL -->
+        <xi:include href="a2dp_audio_policy_configuration.xml"/>
+
+        <!-- Usb Audio HAL -->
+        <xi:include href="usb_audio_policy_configuration.xml"/>
+
+        <!-- Remote Submix Audio HAL -->
+        <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+    </modules>
+    <!-- End of Modules section -->
+
+    <!-- Volume section -->
+
+    <xi:include href="audio_policy_volumes.xml"/>
+    <xi:include href="default_volume_tables.xml"/>
+
+    <!-- End of Volume section -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_stub.xml b/services/audiopolicy/config/audio_policy_configuration_stub.xml
new file mode 100644
index 0000000..a7747f8
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_stub.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2016 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+  <modules>
+        <module name="stub" halVersion="2.0">
+            <attachedDevices>
+                <item>Default Out</item>
+                <item>Default In</item>
+            </attachedDevices>
+            <defaultOutputDevice>Default Out</defaultOutputDevice>
+            <mixPorts>
+                <mixPort name="stub output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+
+                <mixPort name="stub input" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+                </mixPort>
+            </mixPorts>
+            <devicePorts>
+                <devicePort tagName="Default Out" type="AUDIO_DEVICE_OUT_STUB" role="sink">
+                </devicePort>
+
+                <devicePort tagName="Default In" type="AUDIO_DEVICE_IN_STUB" role="source">
+                </devicePort>
+            </devicePorts>
+            <routes>
+                <route type="mix" sink="Default Out" sources="stub output"/>
+
+                <route type="mix" sink="stub input" sources="Default In"/>
+            </routes>
+
+        </module>
+
+        <!-- Remote Submix Audio HAL -->
+        <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+    </modules>
+
+    <xi:include href="audio_policy_volumes.xml"/>
+    <xi:include href="default_volume_tables.xml"/>
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_volumes.xml b/services/audiopolicy/config/audio_policy_volumes.xml
new file mode 100644
index 0000000..43a47b0
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_volumes.xml
@@ -0,0 +1,179 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume stream=”AUDIO_STREAM_MUSIC” deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumes>
+    <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_HEADSET">
+        <point>0,-4200</point>
+        <point>33,-2800</point>
+        <point>66,-1400</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_SPEAKER">
+        <point>0,-2400</point>
+        <point>33,-1600</point>
+        <point>66,-800</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EARPIECE">
+        <point>0,-2400</point>
+        <point>33,-1600</point>
+        <point>66,-800</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                             ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEADSET">
+        <point>1,-3000</point>
+        <point>33,-2600</point>
+        <point>66,-2200</point>
+        <point>100,-1800</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                         ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                         ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                         ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                       ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_SPEAKER">
+        <point>1,-2970</point>
+        <point>33,-2010</point>
+        <point>66,-1020</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                       ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                       ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                        ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                        ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                        ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                        ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                        ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_SPEAKER">
+        <point>1,-2970</point>
+        <point>33,-2010</point>
+        <point>66,-1020</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                        ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                        ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                               ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_SPEAKER">
+        <point>1,-2970</point>
+        <point>33,-2010</point>
+        <point>66,-1020</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                               ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                               ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_HEADSET">
+        <point>0,-4200</point>
+        <point>33,-2800</point>
+        <point>66,-1400</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_SPEAKER">
+        <point>0,-2400</point>
+        <point>33,-1600</point>
+        <point>66,-800</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_EARPIECE">
+        <point>0,-4200</point>
+        <point>33,-2800</point>
+        <point>66,-1400</point>
+        <point>100,0</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_HEADSET">
+        <point>1,-3000</point>
+        <point>33,-2600</point>
+        <point>66,-2200</point>
+        <point>100,-1800</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                                   ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                                   ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                                   ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_HEADSET">
+        <point>1,-3000</point>
+        <point>33,-2600</point>
+        <point>66,-2200</point>
+        <point>100,-1800</point>
+    </volume>
+    <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                       ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                       ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                       ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                      ref="SILENT_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                      ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                      ref="SILENT_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                      ref="SILENT_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                                ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                                ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                            ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                            ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                            ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                            ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_HEADSET"
+                                        ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_SPEAKER"
+                                        ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_EARPIECE"
+                                        ref="FULL_SCALE_VOLUME_CURVE"/>
+    <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
+                                        ref="FULL_SCALE_VOLUME_CURVE"/>
+</volumes>
+
diff --git a/services/audiopolicy/config/default_volume_tables.xml b/services/audiopolicy/config/default_volume_tables.xml
new file mode 100644
index 0000000..9a22b1d
--- /dev/null
+++ b/services/audiopolicy/config/default_volume_tables.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+    <reference name="FULL_SCALE_VOLUME_CURVE">
+    <!-- Full Scale reference Volume Curve -->
+        <point>0,0</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="SILENT_VOLUME_CURVE">
+        <point>0,-9600</point>
+        <point>100,-9600</point>
+    </reference>
+    <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+    <!-- Default System reference Volume Curve -->
+        <point>1,-2400</point>
+        <point>33,-1800</point>
+        <point>66,-1200</point>
+        <point>100,-600</point>
+    </reference>
+    <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+    <!-- Default Media reference Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+    <!--Default Volume Curve -->
+        <point>1,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+    <!-- Default is Speaker Media Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+    <!--Default Volume Curve -->
+        <point>1,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+    <!-- Default is Ext Media System Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-2100</point>
+        <point>100,-1000</point>
+    </reference>
+</volumes>
diff --git a/services/audiopolicy/config/r_submix_audio_policy_configuration.xml b/services/audiopolicy/config/r_submix_audio_policy_configuration.xml
new file mode 100644
index 0000000..dc2a5ec
--- /dev/null
+++ b/services/audiopolicy/config/r_submix_audio_policy_configuration.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- Remote Submix Audio Policy Configuration file -->
+<module name="r_submix" halVersion="2.0">
+    <attachedDevices>
+        <item>Remote Submix In</item>
+    </attachedDevices>
+    <mixPorts>
+        <mixPort name="r_submix output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="r_submix input" role="sink">
+           <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                    samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+        </mixPort>
+   </mixPorts>
+   <devicePorts>
+       <devicePort tagName="Remote Submix Out" type="AUDIO_DEVICE_OUT_REMOTE_SUBMIX"  role="sink">
+           <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                    samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+       </devicePort>
+       <devicePort tagName="Remote Submix In" type="AUDIO_DEVICE_IN_REMOTE_SUBMIX"  role="source">
+           <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                    samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+        </devicePort>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="Remote Submix Out"
+               sources="r_submix output"/>
+        <route type="mix" sink="r_submix input"
+               sources="Remote Submix In"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/config/usb_audio_policy_configuration.xml b/services/audiopolicy/config/usb_audio_policy_configuration.xml
new file mode 100644
index 0000000..1630a94
--- /dev/null
+++ b/services/audiopolicy/config/usb_audio_policy_configuration.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- USB Audio HAL Audio Policy Configuration file -->
+
+<module name="usb" halVersion="2.0">
+    <mixPorts>
+        <mixPort name="usb_accessory output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="usb_device output" role="source"/>
+        <mixPort name="usb_device input" role="sink"/>
+    </mixPorts>
+    <devicePorts>
+        <devicePort tagName="USB Host Out" type="AUDIO_DEVICE_OUT_USB_ACCESSORY" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="USB Device Out" type="AUDIO_DEVICE_OUT_USB_DEVICE" role="sink"/>
+        <devicePort tagName="USB Device In" type="AUDIO_DEVICE_IN_USB_DEVICE" role="source"/>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="USB Host Out"
+               sources="usb_accessory output"/>
+        <route type="mix" sink="USB Device Out"
+               sources="usb_device output"/>
+        <route type="mix" sink="usb_device input"
+               sources="USB Device In"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
index e73e543..567ff9e 100755
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
@@ -133,37 +133,6 @@
     virtual status_t setDeviceConnectionState(const android::sp<android::DeviceDescriptor> devDesc,
                                               audio_policy_dev_state_t state) = 0;
 
-    /**
-     * Translate a volume index given by the UI to an amplification value in dB for a stream type
-     * and a device category.
-     *
-     * @param[in] deviceCategory for which the conversion is requested.
-     * @param[in] stream type for which the conversion is requested.
-     * @param[in] indexInUi index received from the UI to be translated.
-     *
-     * @return amplification value in dB matching the UI index for this given device and stream.
-     */
-    virtual float volIndexToDb(Volume::device_category deviceCategory, audio_stream_type_t stream,
-                                 int indexInUi) = 0;
-
-    /**
-     * Initialize the min / max index of volume applicable for a given stream type. These indexes
-     * will be used upon conversion of UI index to volume amplification.
-     *
-     * @param[in] stream type for which the indexes need to be set
-     * @param[in] indexMin Minimum index allowed for this stream.
-     * @param[in] indexMax Maximum index allowed for this stream.
-     */
-    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) = 0;
-
-    /**
-     * Initialize volume curves for each strategy and device category
-     *
-     * @param[in] isSpeakerDrcEnabled true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER
-                  path to boost soft sounds, used to adjust volume curves accordingly
-     */
-    virtual void initializeVolumeCurves(bool isSpeakerDrcEnabled) = 0;
-
 protected:
     virtual ~AudioPolicyManagerInterface() {}
 };
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index 6d43df2..846fa48 100755
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,6 +16,7 @@
 
 #pragma once
 
+#include <IVolumeCurvesCollection.h>
 #include <AudioGain.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
@@ -25,7 +26,6 @@
 #include <AudioOutputDescriptor.h>
 #include <AudioPolicyMix.h>
 #include <SoundTriggerSession.h>
-#include <StreamDescriptor.h>
 
 namespace android {
 
@@ -51,7 +51,7 @@
 
     virtual const DeviceVector &getAvailableInputDevices() const = 0;
 
-    virtual StreamDescriptorCollection &getStreamDescriptors() = 0;
+    virtual IVolumeCurvesCollection &getVolumeCurves() = 0;
 
     virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
 
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index b18c520..6dba75b 100755
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -35,13 +35,15 @@
     $(call include-path-for, audio-utils) \
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 LOCAL_MODULE := libaudiopolicyengineconfigurable
 LOCAL_MODULE_TAGS := optional
 LOCAL_STATIC_LIBRARIES := \
     libmedia_helper \
     libaudiopolicypfwwrapper \
-    libaudiopolicycomponents
+    libaudiopolicycomponents \
+    libxml2
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
index 74daba5..759d0c9 100755
--- a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
+++ b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
@@ -111,13 +111,12 @@
      * Set the strategy to be followed by a stream.
      *
      * @param[in] stream: name of the stream for which the strategy to use has to be set
-     * @param[in] strategy to follow for the given stream.
+     * @param[in] volumeProfile to follow for the given stream.
      *
-     * @return true if the strategy were set correclty for this stream, false otherwise.
+     * @return true if the profile was set correclty for this stream, false otherwise.
      */
     virtual bool setVolumeProfileForStream(const audio_stream_type_t &stream,
-                                           Volume::device_category category,
-                                           const VolumeCurvePoints &points) = 0;
+                                           const audio_stream_type_t &volumeProfile) = 0;
 
     /**
      * Set the strategy to be followed by a usage.
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk
deleted file mode 100644
index e9b1902..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk
+++ /dev/null
@@ -1,105 +0,0 @@
-################################################################################################
-#
-# @NOTE:
-# Audio Policy Engine configurable example for generic device build
-#
-# Any vendor shall have its own configuration within the corresponding device folder
-#
-################################################################################################
-
-
-LOCAL_PATH := $(call my-dir)
-
-PFW_CORE := external/parameter-framework
-BUILD_PFW_SETTINGS := $(PFW_CORE)/support/android/build_pfw_settings.mk
-PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/Schemas
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-##################################################################
-# CONFIGURATION FILES
-##################################################################
-######### Policy PFW top level file #########
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := ParameterFrameworkConfigurationPolicy.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework
-LOCAL_SRC_FILES := $(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-
-########## Policy PFW Structures #########
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicyClass.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicySubsystem.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_REQUIRED_MODULES := \
-    PolicySubsystem-CommonTypes.xml \
-    PolicySubsystem-Volume.xml \
-    libpolicy-subsystem \
-
-LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicySubsystem-CommonTypes.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicySubsystem-Volume.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-######### Policy PFW Settings #########
-include $(CLEAR_VARS)
-LOCAL_MODULE := parameter-framework.policy
-LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
-LOCAL_ADDITIONAL_DEPENDENCIES := \
-        PolicyClass.xml \
-        PolicySubsystem.xml \
-        ParameterFrameworkConfigurationPolicy.xml
-
-ifeq ($(pfw_rebuild_settings),true)
-PFW_TOPLEVEL_FILE := $(TARGET_OUT_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_CRITERIA_FILE := $(LOCAL_PATH)/policy_criteria.txt
-PFW_EDD_FILES := \
-        $(LOCAL_PATH)/Settings/device_for_strategy_media.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_phone.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_sonification.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_sonification_respectful.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_dtmf.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_enforced_audible.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_transmitted_through_speaker.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_accessibility.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_rerouting.pfw \
-        $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
-        $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
-        $(LOCAL_PATH)/Settings/device_for_input_source.pfw \
-        $(LOCAL_PATH)/Settings/volumes.pfw
-
-include $(BUILD_PFW_SETTINGS)
-else
-# Use the existing file
-LOCAL_SRC_FILES := Settings/$(LOCAL_MODULE_STEM)
-include $(BUILD_PREBUILT)
-endif # pfw_rebuild_settings
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_accessibility.pfw
deleted file mode 100644
index e8ab33b..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_accessibility.pfw
+++ /dev/null
@@ -1,302 +0,0 @@
-supDomain: DeviceForStrategy
-
-	supDomain: Accessibility
-		#
-		# @FIXME: STRATEGY_ACCESSIBILITY follows STRATEGY_MEDIA for now
-		#
-		# @FIXME: How to disable HDMI if !audio_is_linear_pcm other than programmatically???
-		#
-		domain: UnreachableDevices
-			conf: Calibration
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					hdmi_arc = 0
-					spdif = 0
-					aux_line = 0
-					fm = 0
-					speaker_safe = 0
-					earpiece = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
-					bluetooth_sco_carkit = 0
-					telephony_tx = 0
-
-		domain: Device2
-			conf: RemoteSubmix
-				AvailableOutputDevices Includes RemoteSubmix
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 1
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: BluetoothA2dp
-				ForceUseForMedia IsNot ForceNoBtA2dp
-				AvailableOutputDevices Includes BluetoothA2dp
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 1
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: BluetoothA2dpHeadphone
-				ForceUseForMedia IsNot ForceNoBtA2dp
-				AvailableOutputDevices Includes BluetoothA2dpHeadphones
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 1
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: BluetoothA2dpSpeaker
-				ForceUseForMedia IsNot ForceNoBtA2dp
-				AvailableOutputDevices Includes BluetoothA2dpSpeaker
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 1
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: ForceSpeaker
-				ForceUseForMedia Is ForceSpeaker
-				AvailableOutputDevices Includes Speaker
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 1
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: WiredHeadphone
-				AvailableOutputDevices Includes WiredHeadphone
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 1
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: Line
-				AvailableOutputDevices Includes Line
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 1
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: WiredHeadset
-				AvailableOutputDevices Includes WiredHeadset
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 1
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: UsbAccessory
-				AvailableOutputDevices Includes UsbAccessory
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 1
-					usb_device = 0
-					hdmi = 0
-
-			conf: UsbDevice
-				AvailableOutputDevices Includes UsbDevice
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 1
-					hdmi = 0
-
-			conf: DgtlDockHeadset
-				AvailableOutputDevices Includes DgtlDockHeadset
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 1
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: AuxDigital
-				#
-				# Do not route accessibility prompts to a digital output currently configured with a
-				# compressed format as they would likely not be mixed and dropped.
-				#
-				# @TODO How to translate the following condition(???)
-				# desc->isActive() && !audio_is_linear_pcm(desc->mFormat) && devices != AUDIO_DEVICE_NONE
-				#
-				AvailableOutputDevices Includes Hdmi
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 1
-
-			conf: AnlgDockHeadset
-				AvailableOutputDevices Includes AnlgDockHeadset
-				ForceUseForDock Is ForceAnalogDock
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 1
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: Speaker
-				AvailableOutputDevices Includes Speaker
-
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 1
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: Default
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/volumes.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/volumes.pfw
deleted file mode 100644
index 1049564..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/volumes.pfw
+++ /dev/null
@@ -1,545 +0,0 @@
-supDomain: VolumeProfilesForStream
-	domain: Calibration
-		conf: Calibration
-			component: /Policy/policy/streams
-				component: voice_call/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: speaker_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -16.0
-						2/index = 66
-						2/db_attenuation = -8.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-
-				component: system/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -30.0
-						1/index = 33
-						1/db_attenuation = -26.0
-						2/index = 66
-						2/db_attenuation = -22.0
-						3/index = 100
-						3/db_attenuation = -18.0
-					component: speaker_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: earpiece_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -21.0
-						3/index = 100
-						3/db_attenuation = -10.0
-
-				component: ring/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -49.5
-						1/index = 33
-						1/db_attenuation = -33.5
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: speaker_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -35.7
-						1/index = 33
-						1/db_attenuation = -26.1
-						2/index = 66
-						2/db_attenuation = -13.2
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -49.5
-						1/index = 33
-						1/db_attenuation = -33.5
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -27.0
-						3/index = 100
-						3/db_attenuation = -10.0
-
-				component: music/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: speaker_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -56.0
-						1/index = 33
-						1/db_attenuation = -34.0
-						2/index = 66
-						2/db_attenuation = -11.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-
-				component: alarm/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -49.5
-						1/index = 33
-						1/db_attenuation = -33.5
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: speaker_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -35.7
-						1/index = 33
-						1/db_attenuation = -26.1
-						2/index = 66
-						2/db_attenuation = -13.2
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -49.5
-						1/index = 33
-						1/db_attenuation = -33.5
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -27.0
-						3/index = 100
-						3/db_attenuation = -10.0
-
-				component: notification/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -49.5
-						1/index = 33
-						1/db_attenuation = -33.5
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: speaker_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -35.7
-						1/index = 33
-						1/db_attenuation = -26.1
-						2/index = 66
-						2/db_attenuation = -13.2
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -49.5
-						1/index = 33
-						1/db_attenuation = -33.5
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -27.0
-						3/index = 100
-						3/db_attenuation = -10.0
-
-				component: bluetooth_sco/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: speaker_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -16.0
-						2/index = 66
-						2/db_attenuation = -8.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-
-				component: enforced_audible/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -30.0
-						1/index = 33
-						1/db_attenuation = -26.0
-						2/index = 66
-						2/db_attenuation = -22.0
-						3/index = 100
-						3/db_attenuation = -18.0
-					component: speaker_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: earpiece_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -27.0
-						3/index = 100
-						3/db_attenuation = -10.0
-
-				component: tts/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -96.0
-						1/index = 1
-						1/db_attenuation = -96.0
-						2/index = 2
-						2/db_attenuation = -96.0
-						3/index = 100
-						3/db_attenuation = -96.0
-					component: speaker_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -96.0
-						1/index = 33
-						1/db_attenuation = -68.0
-						2/index = 66
-						2/db_attenuation = -34.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -96.0
-						1/index = 1
-						1/db_attenuation = -96.0
-						2/index = 2
-						2/db_attenuation = -96.0
-						3/index = 100
-						3/db_attenuation = -96.0
-					component: extmedia_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -96.0
-						1/index = 1
-						1/db_attenuation = -96.0
-						2/index = 2
-						2/db_attenuation = -96.0
-						3/index = 100
-						3/db_attenuation = -96.0
-
-				component: accessibility/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: speaker_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -56.0
-						1/index = 33
-						1/db_attenuation = -34.0
-						2/index = 66
-						2/db_attenuation = -11.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-
-				component: rerouting/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = 0.0
-						1/index = 1
-						1/db_attenuation = 0.0
-						2/index = 2
-						2/db_attenuation = 0.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: speaker_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = 0.0
-						1/index = 1
-						1/db_attenuation = 0.0
-						2/index = 2
-						2/db_attenuation = 0.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = 0.0
-						1/index = 1
-						1/db_attenuation = 0.0
-						2/index = 2
-						2/db_attenuation = 0.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: extmedia_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = 0.0
-						1/index = 1
-						1/db_attenuation = 0.0
-						2/index = 2
-						2/db_attenuation = 0.0
-						3/index = 100
-						3/db_attenuation = 0.0
-
-				component: patch/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = 0.0
-						1/index = 1
-						1/db_attenuation = 0.0
-						2/index = 2
-						2/db_attenuation = 0.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: speaker_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = 0.0
-						1/index = 1
-						1/db_attenuation = 0.0
-						2/index = 2
-						2/db_attenuation = 0.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = 0.0
-						1/index = 1
-						1/db_attenuation = 0.0
-						2/index = 2
-						2/db_attenuation = 0.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: extmedia_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = 0.0
-						1/index = 1
-						1/db_attenuation = 0.0
-						2/index = 2
-						2/db_attenuation = 0.0
-						3/index = 100
-						3/db_attenuation = 0.0
-
-	domain: Dtmf
-		conf: InCall
-			ANY
-				TelephonyMode Is InCall
-				TelephonyMode Is InCommunication
-
-			component: /Policy/policy/streams
-				component: dtmf/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -30.0
-						1/index = 33
-						1/db_attenuation = -26.0
-						2/index = 66
-						2/db_attenuation = -22.0
-						3/index = 100
-						3/db_attenuation = -18.0
-					component: speaker_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: earpiece_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -27.0
-						3/index = 100
-						3/db_attenuation = -10.0
-
-		conf: OutOfCall
-			component: /Policy/policy/streams
-				component: dtmf/volume_profiles
-					component: headset_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: speaker_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -16.0
-						2/index = 66
-						2/db_attenuation = -8.0
-						3/index = 100
-						3/db_attenuation = 0.0
-					component: earpiece_device_category/curve_points
-						0/index = 0
-						0/db_attenuation = -24.0
-						1/index = 33
-						1/db_attenuation = -18.0
-						2/index = 66
-						2/db_attenuation = -12.0
-						3/index = 100
-						3/db_attenuation = -6.0
-					component: extmedia_device_category/curve_points
-						0/index = 1
-						0/db_attenuation = -58.0
-						1/index = 33
-						1/db_attenuation = -40.0
-						2/index = 66
-						2/db_attenuation = -17.0
-						3/index = 100
-						3/db_attenuation = 0.0
-
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-Volume.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-Volume.xml
deleted file mode 100755
index cf39cc2..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-Volume.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<ComponentTypeSet xmlns:xi="http://www.w3.org/2001/XInclude"
-                  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-                  xsi:noNamespaceSchemaLocation="Schemas/ComponentTypeSet.xsd">
-  <ComponentType Name="VolumeCurvePoints">
-    <ParameterBlock Name="curve_points" ArrayLength="4" Mapping="VolumeProfile:'%1'"
-        Description="4 points to define the volume attenuation curve, each
-                     characterized by the volume index (from 0 to 100) at which
-                     they apply, and the attenuation in dB at that index.
-                     We use 100 steps to avoid rounding errors when computing
-                     the volume">
-        <IntegerParameter Name="index" Size="32"/>
-        <FixedPointParameter Name="db_attenuation" Size="16" Integral="7" Fractional="8"/>
-     </ParameterBlock>
-    </ComponentType>
-
-    <ComponentType Name="VolumeCurvesCategories">
-        <Component Name="headset_device_category" Type="VolumeCurvePoints" Mapping="Category:0"/>
-        <Component Name="speaker_device_category" Type="VolumeCurvePoints" Mapping="Category:1"/>
-        <Component Name="earpiece_device_category" Type="VolumeCurvePoints" Mapping="Category:2"/>
-        <Component Name="extmedia_device_category" Type="VolumeCurvePoints" Mapping="Category:3"/>
-    </ComponentType>
-
-</ComponentTypeSet>
-
-
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
new file mode 100644
index 0000000..baaefd2
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
@@ -0,0 +1,162 @@
+################################################################################################
+#
+# @NOTE:
+# Audio Policy Engine configurable example for generic device build
+#
+# Any vendor shall have its own configuration within the corresponding device folder
+#
+################################################################################################
+
+ifeq (1, 0)
+
+LOCAL_PATH := $(call my-dir)
+
+PFW_CORE := external/parameter-framework
+BUILD_PFW_SETTINGS := $(PFW_CORE)/support/android/build_pfw_settings.mk
+PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+##################################################################
+# CONFIGURATION FILES
+##################################################################
+######### Policy PFW top level file #########
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := ParameterFrameworkConfigurationPolicy.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework
+LOCAL_SRC_FILES := $(LOCAL_MODULE).in
+
+AUDIO_PATTERN = @TUNING_ALLOWED@
+ifeq ($(TARGET_BUILD_VARIANT),user)
+AUDIO_VALUE = false
+else
+AUDIO_VALUE = true
+endif
+
+LOCAL_POST_INSTALL_CMD := $(hide) sed -i -e 's|$(AUDIO_PATTERN)|$(AUDIO_VALUE)|g' $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+
+include $(BUILD_PREBUILT)
+
+
+########## Policy PFW Structures #########
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicyClass.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_REQUIRED_MODULES := \
+    PolicySubsystem-CommonTypes.xml \
+    PolicySubsystem-Volume.xml \
+    libpolicy-subsystem \
+
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem-CommonTypes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+######### Policy PFW Settings #########
+include $(CLEAR_VARS)
+LOCAL_MODULE := parameter-framework.policy
+LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+        PolicyClass.xml \
+        PolicySubsystem.xml \
+        ParameterFrameworkConfigurationPolicy.xml
+
+ifeq ($(pfw_rebuild_settings),true)
+PFW_TOPLEVEL_FILE := $(TARGET_OUT_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
+PFW_CRITERIA_FILE := $(LOCAL_PATH)/policy_criteria.txt
+PFW_EDD_FILES := \
+        $(LOCAL_PATH)/Settings/device_for_strategy_media.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_phone.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_sonification.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_sonification_respectful.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_dtmf.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_enforced_audible.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_transmitted_through_speaker.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_accessibility.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_rerouting.pfw \
+        $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
+        $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
+        $(LOCAL_PATH)/Settings/device_for_input_source.pfw \
+        $(LOCAL_PATH)/Settings/volumes.pfw
+
+include $(BUILD_PFW_SETTINGS)
+else
+# Use the existing file
+LOCAL_SRC_FILES := Settings/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+endif # pfw_rebuild_settings
+
+######### Policy PFW Settings - No Output #########
+include $(CLEAR_VARS)
+LOCAL_MODULE := parameter-framework.policy.no-output
+LOCAL_MODULE_STEM := PolicyConfigurableDomains-NoOutputDevice.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+        PolicyClass.xml \
+        PolicySubsystem.xml \
+        ParameterFrameworkConfigurationPolicy.xml
+
+PFW_TOPLEVEL_FILE := $(TARGET_OUT_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
+PFW_CRITERIA_FILE := $(LOCAL_PATH)/policy_criteria.txt
+PFW_EDD_FILES := \
+        $(LOCAL_PATH)/SettingsNoOutput/device_for_strategies.pfw \
+        $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
+        $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
+        $(LOCAL_PATH)/Settings/device_for_input_source.pfw \
+        $(LOCAL_PATH)/Settings/volumes.pfw
+
+include $(BUILD_PFW_SETTINGS)
+
+######### Policy PFW Settings - No Input #########
+include $(CLEAR_VARS)
+LOCAL_MODULE := parameter-framework.policy.no-input
+LOCAL_MODULE_STEM := PolicyConfigurableDomains-NoInputDevice.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+        PolicyClass.xml \
+        PolicySubsystem.xml \
+        ParameterFrameworkConfigurationPolicy.xml
+
+PFW_TOPLEVEL_FILE := $(TARGET_OUT_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
+PFW_CRITERIA_FILE := $(LOCAL_PATH)/policy_criteria.txt
+PFW_EDD_FILES := \
+        $(LOCAL_PATH)/Settings/device_for_strategy_media.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_phone.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_sonification.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_sonification_respectful.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_dtmf.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_enforced_audible.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_transmitted_through_speaker.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_accessibility.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_rerouting.pfw \
+        $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
+        $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
+        $(LOCAL_PATH)/SettingsNoInput/device_for_input_source.pfw \
+        $(LOCAL_PATH)/Settings/volumes.pfw
+
+include $(BUILD_PFW_SETTINGS)
+
+endif # ifeq (1, 0)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
similarity index 77%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
index 6905201..f5615cd 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
@@ -1,7 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <ParameterFrameworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:noNamespaceSchemaLocation="Schemas/ParameterFrameworkConfiguration.xsd"
-    SystemClassName="Policy" ServerPort="5019" TuningAllowed="true">
+    SystemClassName="Policy" ServerPort="5019" TuningAllowed="@TUNING_ALLOWED@">
 
     <SubsystemPlugins>
         <Location Folder="">
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/README.md b/services/audiopolicy/engineconfigurable/parameter-framework/examples/README.md
similarity index 100%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/README.md
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/README.md
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
similarity index 79%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
index 8cb0723..aa2af0f 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <!-- #### DO NOT EDIT THIS FILE #### -->
-<ConfigurableDomains xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="Schemas/ConfigurableDomains.xsd" SystemClassName="Policy">
+<ConfigurableDomains SystemClassName="Policy">
   <ConfigurableDomain Name="DeviceForStrategy.Media.UnreachableDevices" SequenceAware="false">
     <Configurations>
       <Configuration Name="Calibration">
@@ -15,6 +15,9 @@
       <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_sco_headset"/>
       <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_sco_carkit"/>
       <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -39,6 +42,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/telephony_tx">
           <BitParameter Name="telephony_tx">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -52,18 +64,21 @@
       <Configuration Name="BluetoothA2dp">
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceBtSco"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="BluetoothA2dpHeadphone">
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceBtSco"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="BluetoothA2dpSpeaker">
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceBtSco"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
         </CompoundRule>
       </Configuration>
@@ -119,6 +134,7 @@
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
           <SelectionCriterionRule SelectionCriterion="ForceUseForHdmiSystemAudio" MatchesWhen="IsNot" Value="ForceHdmiSystemEnforced"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceBtSco"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="Default">
@@ -852,6 +868,9 @@
       <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/fm"/>
       <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -873,6 +892,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker_safe">
           <BitParameter Name="speaker_safe">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -902,10 +930,7 @@
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
-          <CompoundRule Type="Any">
-            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
-            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
-          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="BluetoothA2dpHeadphones">
@@ -914,10 +939,7 @@
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
           <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
-          <CompoundRule Type="Any">
-            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
-            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
-          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="BluetoothA2dpSpeaker">
@@ -941,6 +963,12 @@
           <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
         </CompoundRule>
       </Configuration>
+      <Configuration Name="Line">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
       <Configuration Name="UsbDevice">
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
@@ -990,12 +1018,6 @@
           <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
         </CompoundRule>
       </Configuration>
-      <Configuration Name="Line">
-        <CompoundRule Type="All">
-          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
-        </CompoundRule>
-      </Configuration>
       <Configuration Name="Speaker">
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
@@ -1314,10 +1336,10 @@
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
-          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
-          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+          <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
           <BitParameter Name="hdmi">0</BitParameter>
@@ -1450,6 +1472,59 @@
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
+      <Configuration Name="Line">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
       <Configuration Name="UsbDevice">
         <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
           <BitParameter Name="earpiece">0</BitParameter>
@@ -1768,59 +1843,6 @@
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="Line">
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
-          <BitParameter Name="earpiece">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
-          <BitParameter Name="wired_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
-          <BitParameter Name="wired_headphone">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
-          <BitParameter Name="bluetooth_sco">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
-          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
-          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
-          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
-          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
-          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
-          <BitParameter Name="hdmi">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
-          <BitParameter Name="angl_dock_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
-          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
-          <BitParameter Name="usb_accessory">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
-          <BitParameter Name="usb_device">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
-          <BitParameter Name="telephony_tx">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
-          <BitParameter Name="line">1</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
-          <BitParameter Name="speaker">0</BitParameter>
-        </ConfigurableElement>
-      </Configuration>
       <Configuration Name="Speaker">
         <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
           <BitParameter Name="earpiece">0</BitParameter>
@@ -1924,7 +1946,7 @@
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
-          <BitParameter Name="speaker">0</BitParameter>
+          <BitParameter Name="speaker">1</BitParameter>
         </ConfigurableElement>
       </Configuration>
     </Settings>
@@ -1943,6 +1965,9 @@
       <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/speaker_safe"/>
       <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/aux_line"/>
       <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -1967,6 +1992,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/hdmi">
           <BitParameter Name="hdmi">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -2095,7 +2129,8 @@
                 <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
                 <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
               </CompoundRule>
-              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="WiredHeadset"/>
             </CompoundRule>
             <CompoundRule Type="All">
               <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
@@ -2921,6 +2956,9 @@
       <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/fm"/>
       <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -2942,6 +2980,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/telephony_tx">
           <BitParameter Name="telephony_tx">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -3084,12 +3131,25 @@
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
         </CompoundRule>
       </Configuration>
-      <Configuration Name="LineWhenFollowMediaStrategy">
+      <Configuration Name="Line">
         <CompoundRule Type="All">
-          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
-          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
-          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="WiredHeadset"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="WiredHeadset">
@@ -3174,16 +3234,6 @@
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Earpiece"/>
         </CompoundRule>
       </Configuration>
-      <Configuration Name="Line">
-        <CompoundRule Type="All">
-          <CompoundRule Type="Any">
-            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
-            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
-          </CompoundRule>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
-          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
-        </CompoundRule>
-      </Configuration>
     </Configurations>
     <ConfigurableElements>
       <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece"/>
@@ -3532,7 +3582,7 @@
           <BitParameter Name="hdmi">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="LineWhenFollowMediaStrategy">
+      <Configuration Name="Line">
         <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
           <BitParameter Name="earpiece">0</BitParameter>
         </ConfigurableElement>
@@ -3908,53 +3958,6 @@
           <BitParameter Name="hdmi">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="Line">
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
-          <BitParameter Name="earpiece">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
-          <BitParameter Name="bluetooth_sco">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
-          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
-          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
-          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
-          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
-          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
-          <BitParameter Name="wired_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
-          <BitParameter Name="wired_headphone">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
-          <BitParameter Name="line">1</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
-          <BitParameter Name="angl_dock_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
-          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
-          <BitParameter Name="usb_accessory">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
-          <BitParameter Name="usb_device">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
-          <BitParameter Name="hdmi">0</BitParameter>
-        </ConfigurableElement>
-      </Configuration>
     </Settings>
   </ConfigurableDomain>
   <ConfigurableDomain Name="DeviceForStrategy.Dtmf.UnreachableDevices" SequenceAware="false">
@@ -3967,6 +3970,9 @@
       <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/fm"/>
       <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker_safe"/>
       <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -3979,6 +3985,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_carkit">
           <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -4062,10 +4077,22 @@
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
         </CompoundRule>
       </Configuration>
-      <Configuration Name="LineWhenFollowingMedia">
+      <Configuration Name="Line">
         <CompoundRule Type="All">
-          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
-          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="WiredHeadset"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+            </CompoundRule>
+          </CompoundRule>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
         </CompoundRule>
       </Configuration>
@@ -4146,16 +4173,6 @@
           <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
         </CompoundRule>
       </Configuration>
-      <Configuration Name="LineWhenFallThroughPhone">
-        <CompoundRule Type="All">
-          <CompoundRule Type="Any">
-            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
-            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
-          </CompoundRule>
-          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
-          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
-        </CompoundRule>
-      </Configuration>
       <Configuration Name="Speaker">
         <CompoundRule Type="All">
           <CompoundRule Type="Any">
@@ -4623,7 +4640,7 @@
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="LineWhenFollowingMedia">
+      <Configuration Name="Line">
         <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
@@ -5047,59 +5064,6 @@
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="LineWhenFallThroughPhone">
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
-          <BitParameter Name="remote_submix">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
-          <BitParameter Name="earpiece">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
-          <BitParameter Name="wired_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
-          <BitParameter Name="wired_headphone">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
-          <BitParameter Name="bluetooth_sco">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
-          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
-          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
-          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
-          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
-          <BitParameter Name="hdmi">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
-          <BitParameter Name="angl_dock_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
-          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
-          <BitParameter Name="usb_accessory">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
-          <BitParameter Name="usb_device">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
-          <BitParameter Name="telephony_tx">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
-          <BitParameter Name="line">1</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
-          <BitParameter Name="speaker">0</BitParameter>
-        </ConfigurableElement>
-      </Configuration>
       <Configuration Name="Speaker">
         <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
@@ -5222,16 +5186,16 @@
       </Configuration>
     </Configurations>
     <ConfigurableElements>
-      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Selected">
-        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc">
           <BitParameter Name="hdmi_arc">1</BitParameter>
         </ConfigurableElement>
       </Configuration>
       <Configuration Name="NotSelected">
-        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc">
           <BitParameter Name="hdmi_arc">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
@@ -5251,16 +5215,16 @@
       </Configuration>
     </Configurations>
     <ConfigurableElements>
-      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Selected">
-        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif">
           <BitParameter Name="spdif">1</BitParameter>
         </ConfigurableElement>
       </Configuration>
       <Configuration Name="NotSelected">
-        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif">
           <BitParameter Name="spdif">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
@@ -5280,16 +5244,16 @@
       </Configuration>
     </Configurations>
     <ConfigurableElements>
-      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Selected">
-        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line">
           <BitParameter Name="aux_line">1</BitParameter>
         </ConfigurableElement>
       </Configuration>
       <Configuration Name="NotSelected">
-        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line">
           <BitParameter Name="aux_line">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
@@ -5302,17 +5266,17 @@
       </Configuration>
     </Configurations>
     <ConfigurableElements>
-      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix"/>
       <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi_arc"/>
       <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/aux_line"/>
       <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
-          <BitParameter Name="remote_submix">0</BitParameter>
-        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi_arc">
           <BitParameter Name="hdmi_arc">0</BitParameter>
         </ConfigurableElement>
@@ -5325,6 +5289,18 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
           <BitParameter Name="speaker_safe">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -5456,6 +5432,9 @@
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
         </CompoundRule>
       </Configuration>
+      <Configuration Name="NoDevice">
+        <CompoundRule Type="All"/>
+      </Configuration>
     </Configurations>
     <ConfigurableElements>
       <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix"/>
@@ -5475,8 +5454,6 @@
       <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device"/>
       <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx"/>
       <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line"/>
-      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm"/>
-      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="RemoteSubmix">
@@ -5531,12 +5508,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="BluetoothA2dp">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5590,12 +5561,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="BluetoothA2dpHeadphones">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5649,12 +5614,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="BluetoothA2dpSpeaker">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5708,12 +5667,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="WiredHeadphone">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5767,12 +5720,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="Line">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5826,12 +5773,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">1</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="WiredHeadset">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5885,12 +5826,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="UsbAccessory">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5944,12 +5879,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="UsbDevice">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -6003,12 +5932,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="DgtlDockHeadset">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -6062,12 +5985,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="Hdmi">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -6121,12 +6038,6 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
-        </ConfigurableElement>
       </Configuration>
       <Configuration Name="AnlgDockHeadset">
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -6180,11 +6091,58 @@
         <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
-          <BitParameter Name="fm">0</BitParameter>
+      </Configuration>
+      <Configuration Name="NoDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
-          <BitParameter Name="speaker_safe">0</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
     </Settings>
@@ -6218,6 +6176,9 @@
       <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/usb_device"/>
       <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/telephony_tx"/>
       <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -6287,6 +6248,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/line">
           <BitParameter Name="line">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -6329,11 +6299,10 @@
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/aux_line"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/fm"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker_safe"/>
-      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece"/>
-      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco"/>
-      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset"/>
-      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -6352,39 +6321,42 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker_safe">
           <BitParameter Name="speaker_safe">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
-          <BitParameter Name="earpiece">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
-          <BitParameter Name="bluetooth_sco">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
-          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
-          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
-        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/telephony_tx">
           <BitParameter Name="telephony_tx">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
-  <ConfigurableDomain Name="DeviceForStrategy.Accessibility.Device2" SequenceAware="false">
+  <ConfigurableDomain Name="DeviceForStrategy.Accessibility.Device" SequenceAware="false">
     <Configurations>
       <Configuration Name="RemoteSubmix">
         <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="RemoteSubmix"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="BluetoothA2dp">
         <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="BluetoothA2dpHeadphone">
         <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
         </CompoundRule>
@@ -6392,56 +6364,159 @@
       <Configuration Name="BluetoothA2dpSpeaker">
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
         </CompoundRule>
       </Configuration>
-      <Configuration Name="ForceSpeaker">
+      <Configuration Name="ForceSpeakerWhenNotInCall">
         <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceSpeaker"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
         </CompoundRule>
       </Configuration>
+      <Configuration Name="BluetoothScoCarkit">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoCarkit"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothScoHeadset">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothSco">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothSco"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
       <Configuration Name="WiredHeadphone">
         <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+            </CompoundRule>
+          </CompoundRule>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="Line">
         <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="WiredHeadset"/>
+            </CompoundRule>
+          </CompoundRule>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="WiredHeadset">
         <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+            </CompoundRule>
+          </CompoundRule>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
         </CompoundRule>
       </Configuration>
-      <Configuration Name="UsbAccessory">
-        <CompoundRule Type="All">
-          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
-        </CompoundRule>
-      </Configuration>
       <Configuration Name="UsbDevice">
         <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
         </CompoundRule>
       </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+        </CompoundRule>
+      </Configuration>
       <Configuration Name="DgtlDockHeadset">
         <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="AuxDigital">
         <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Hdmi"/>
         </CompoundRule>
       </Configuration>
       <Configuration Name="AnlgDockHeadset">
         <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
           <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="Is" Value="ForceAnalogDock"/>
         </CompoundRule>
       </Configuration>
+      <Configuration Name="Earpiece">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Earpiece"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
       <Configuration Name="Speaker">
         <CompoundRule Type="All">
           <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
@@ -6453,9 +6528,13 @@
     </Configurations>
     <ConfigurableElements>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset"/>
       <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone"/>
@@ -6471,6 +6550,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">1</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6480,6 +6562,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6512,6 +6603,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">1</BitParameter>
         </ConfigurableElement>
@@ -6521,6 +6615,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6553,6 +6656,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6562,6 +6668,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6594,6 +6709,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6603,6 +6721,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6631,10 +6758,13 @@
           <BitParameter Name="hdmi">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="ForceSpeaker">
+      <Configuration Name="ForceSpeakerWhenNotInCall">
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6644,6 +6774,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">1</BitParameter>
         </ConfigurableElement>
@@ -6672,10 +6811,13 @@
           <BitParameter Name="hdmi">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="WiredHeadphone">
+      <Configuration Name="BluetoothScoCarkit">
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6685,6 +6827,174 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothScoHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothSco">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6717,6 +7027,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6726,6 +7039,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6758,6 +7080,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6767,6 +7092,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6795,51 +7129,13 @@
           <BitParameter Name="hdmi">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="UsbAccessory">
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
-          <BitParameter Name="remote_submix">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
-          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
-          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
-          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
-          <BitParameter Name="speaker">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
-          <BitParameter Name="wired_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
-          <BitParameter Name="wired_headphone">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
-          <BitParameter Name="line">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
-          <BitParameter Name="angl_dock_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
-          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
-          <BitParameter Name="usb_accessory">1</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
-          <BitParameter Name="usb_device">0</BitParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
-          <BitParameter Name="hdmi">0</BitParameter>
-        </ConfigurableElement>
-      </Configuration>
       <Configuration Name="UsbDevice">
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6849,6 +7145,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6877,10 +7182,13 @@
           <BitParameter Name="hdmi">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="DgtlDockHeadset">
+      <Configuration Name="UsbAccessory">
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6890,6 +7198,68 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6922,6 +7292,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6931,6 +7304,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -6963,6 +7345,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -6972,6 +7357,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -7000,10 +7394,13 @@
           <BitParameter Name="hdmi">0</BitParameter>
         </ConfigurableElement>
       </Configuration>
-      <Configuration Name="Speaker">
+      <Configuration Name="Earpiece">
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">1</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -7013,6 +7410,68 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">1</BitParameter>
         </ConfigurableElement>
@@ -7045,6 +7504,9 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
           <BitParameter Name="remote_submix">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
@@ -7054,6 +7516,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
           <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
           <BitParameter Name="speaker">0</BitParameter>
         </ConfigurableElement>
@@ -7101,6 +7572,9 @@
       <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_sco_headset"/>
       <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_sco_carkit"/>
       <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/stub"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
@@ -7134,6 +7608,15 @@
         <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/telephony_tx">
           <BitParameter Name="telephony_tx">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -8026,7 +8509,6 @@
       </Configuration>
     </Configurations>
     <ConfigurableElements>
-      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/in"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/hdmi"/>
@@ -8041,7 +8523,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/line"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/hdmi"/>
@@ -8056,7 +8540,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/line"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/builtin_mic"/>
@@ -8075,7 +8561,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/builtin_mic"/>
@@ -8094,7 +8582,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/builtin_mic"/>
@@ -8113,7 +8603,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/bluetooth_sco_headset"/>
@@ -8131,7 +8623,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/hdmi"/>
@@ -8147,7 +8641,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/hdmi"/>
@@ -8162,7 +8658,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/builtin_mic"/>
@@ -8181,7 +8679,9 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/hdmi"/>
@@ -8197,7 +8697,27 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/loopback"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/stub"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bus"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/stub"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/communication"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ambient"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bluetooth_sco_headset"/>
@@ -8211,18 +8731,16 @@
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/dgtl_dock_headset"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_accessory"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_device"/>
-      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/tv_tuner"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/line"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/spdif"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bluetooth_a2dp"/>
       <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ip"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
-        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
-        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
         </ConfigurableElement>
@@ -8265,8 +8783,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8310,8 +8834,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8367,8 +8897,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8424,8 +8960,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8481,8 +9023,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8535,8 +9083,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8583,8 +9137,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8628,8 +9188,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8685,8 +9251,14 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8733,8 +9305,68 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/in">
-          <BitParameter Name="in">1</BitParameter>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/stub">
+          <BitParameter Name="stub">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/communication">
           <BitParameter Name="communication">0</BitParameter>
@@ -8775,9 +9407,6 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_device">
           <BitParameter Name="usb_device">0</BitParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner">
-          <BitParameter Name="fm_tuner">0</BitParameter>
-        </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/tv_tuner">
           <BitParameter Name="tv_tuner">0</BitParameter>
         </ConfigurableElement>
@@ -8793,6 +9422,12 @@
         <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/loopback">
           <BitParameter Name="loopback">0</BitParameter>
         </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ip">
+          <BitParameter Name="ip">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus">
+          <BitParameter Name="bus">0</BitParameter>
+        </ConfigurableElement>
       </Configuration>
     </Settings>
   </ConfigurableDomain>
@@ -8893,7 +9528,7 @@
           <BitParameter Name="bluetooth_a2dp">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/wired_headset">
-          <BitParameter Name="wired_headset">1</BitParameter>
+          <BitParameter Name="wired_headset">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_device">
           <BitParameter Name="usb_device">0</BitParameter>
@@ -9439,7 +10074,7 @@
           <BitParameter Name="usb_device">0</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/builtin_mic">
-          <BitParameter Name="builtin_mic">0</BitParameter>
+          <BitParameter Name="builtin_mic">1</BitParameter>
         </ConfigurableElement>
         <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/back_mic">
           <BitParameter Name="back_mic">0</BitParameter>
@@ -9508,1544 +10143,56 @@
       </Configuration>
     </Configurations>
     <ConfigurableElements>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/applicable_volume_profile/volume_profile"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/applicable_volume_profile/volume_profile"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="Calibration">
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">voice_call</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/system/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">system</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">ring</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/music/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">music</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">alarm</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">notification</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">bluetooth_sco</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">enforced_audible</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">tts</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">accessibility</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">rerouting</EnumParameter>
         </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-16.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-8.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-30.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-26.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-22.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-21.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-35.69921875</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-26.10156250</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-13.19921875</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-56.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-34.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-11.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-35.69921875</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-26.10156250</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-13.19921875</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-35.69921875</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-26.10156250</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-13.19921875</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-16.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-8.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-30.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-26.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-22.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-68.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-34.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-56.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-34.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-11.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">2</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">patch</EnumParameter>
         </ConfigurableElement>
       </Configuration>
     </Settings>
@@ -11063,234 +10210,17 @@
       </Configuration>
     </Configurations>
     <ConfigurableElements>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/index"/>
-      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/applicable_volume_profile/volume_profile"/>
     </ConfigurableElements>
     <Settings>
       <Configuration Name="InCall">
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-30.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-26.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-22.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">voice_call</EnumParameter>
         </ConfigurableElement>
       </Configuration>
       <Configuration Name="OutOfCall">
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-16.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-8.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">0</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/index">
-          <IntegerParameter Name="index">1</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/index">
-          <IntegerParameter Name="index">33</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/index">
-          <IntegerParameter Name="index">66</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/index">
-          <IntegerParameter Name="index">100</IntegerParameter>
-        </ConfigurableElement>
-        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
-          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/applicable_volume_profile/volume_profile">
+          <EnumParameter Name="volume_profile">dtmf</EnumParameter>
         </ConfigurableElement>
       </Configuration>
     </Settings>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_input_source.pfw
similarity index 88%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_input_source.pfw
index d4bc370..a990879 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_input_source.pfw
@@ -1,12 +1,7 @@
 supDomain: DeviceForInputSource
 	domain: Calibration
 		conf: Calibration
-			#
-			# Note that ALL input devices must have the sign bit set to 1.
-			# As the devices is a mask, use the "in" bit as a direction indicator.
-			#
 			component: /Policy/policy/input_sources/default/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				hdmi = 0
@@ -21,8 +16,10 @@
 				line = 0
 				spdif = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/mic/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				hdmi = 0
@@ -37,8 +34,10 @@
 				line = 0
 				spdif = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/voice_downlink/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				builtin_mic = 0
@@ -57,8 +56,10 @@
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/voice_call/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				builtin_mic = 0
@@ -77,8 +78,10 @@
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/voice_uplink/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				builtin_mic = 0
@@ -97,8 +100,10 @@
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				bluetooth_sco_headset = 0
@@ -116,8 +121,10 @@
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/voice_recognition/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				hdmi = 0
@@ -133,8 +140,10 @@
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				hdmi = 0
@@ -149,8 +158,10 @@
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				builtin_mic = 0
@@ -169,8 +180,10 @@
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/hotword/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				hdmi = 0
@@ -186,8 +199,29 @@
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
+			component: /Policy/policy/input_sources/unprocessed/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 			component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
-				in = 1
 				communication = 0
 				ambient = 0
 				bluetooth_sco_headset = 0
@@ -201,12 +235,14 @@
 				dgtl_dock_headset = 0
 				usb_accessory = 0
 				usb_device = 0
-				fm_tuner = 0
 				tv_tuner = 0
 				line = 0
 				spdif = 0
 				bluetooth_a2dp = 0
 				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 0
 
 	domain: DefaultAndMic
 		conf: A2dp
@@ -239,7 +275,7 @@
 					bluetooth_sco_headset = 1
 				component: mic/applicable_input_device/mask/
 					bluetooth_a2dp = 0
-					wired_headset = 1
+					wired_headset = 0
 					usb_device = 0
 					builtin_mic = 0
 					bluetooth_sco_headset = 1
@@ -345,7 +381,7 @@
 				back_mic = 0
 				builtin_mic = 0
 
-	domain: VoiceRecognitionAndHotword
+	domain: VoiceRecognitionAndUnprocessedAndHotword
 		conf: ScoHeadset
 			ForceUseForRecord Is ForceBtSco
 			AvailableInputDevices Includes BluetoothScoHeadset
@@ -356,6 +392,11 @@
 					wired_headset = 0
 					usb_device = 0
 					builtin_mic = 0
+				component: unprocessed/applicable_input_device/mask
+					bluetooth_sco_headset = 1
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
 				component: hotword/applicable_input_device/mask
 					bluetooth_sco_headset = 1
 					wired_headset = 0
@@ -371,6 +412,11 @@
 					wired_headset = 1
 					usb_device = 0
 					builtin_mic = 0
+				component: unprocessed/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 1
+					usb_device = 0
+					builtin_mic = 0
 				component: hotword/applicable_input_device/mask
 					bluetooth_sco_headset = 0
 					wired_headset = 1
@@ -386,6 +432,11 @@
 					wired_headset = 0
 					usb_device = 1
 					builtin_mic = 0
+				component: unprocessed/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 1
+					builtin_mic = 0
 				component: hotword/applicable_input_device/mask
 					bluetooth_sco_headset = 0
 					wired_headset = 0
@@ -401,6 +452,11 @@
 					wired_headset = 0
 					usb_device = 0
 					builtin_mic = 1
+				component: unprocessed/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 1
 				component: hotword/applicable_input_device/mask
 					bluetooth_sco_headset = 0
 					wired_headset = 0
@@ -414,6 +470,11 @@
 					wired_headset = 0
 					usb_device = 0
 					builtin_mic = 0
+				component: unprocessed/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
 				component: hotword/applicable_input_device/mask
 					bluetooth_sco_headset = 0
 					wired_headset = 0
@@ -484,11 +545,14 @@
 				back_mic = 1
 
 		conf: Default
+			#
+			# Fallback on the default input device which can be builtin mic for example
+			#
 			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
 				bluetooth_sco_headset = 0
 				wired_headset = 0
 				usb_device = 0
-				builtin_mic = 0
+				builtin_mic = 1
 				back_mic = 0
 
 	domain: RemoteSubmix
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
similarity index 67%
copy from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
copy to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
index 85273b2..ecd56b0 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
@@ -1,158 +1,197 @@
 supDomain: DeviceForStrategy
 
-	supDomain: Dtmf
-
+	supDomain: Accessibility
+		#
+		# STRATEGY_ACCESSIBILITY follows STRATEGY_PHONE if in call widely speaking
+		# STRATEGY_ACCESSIBILITY follows STRATEGY_MEDIA otherwise
+		#
+		# Other case are handled programmatically has involving activity of streams.
+		#
 		domain: UnreachableDevices
 			conf: Calibration
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					hdmi_arc = 0
+					spdif = 0
+					aux_line = 0
 					fm = 0
 					speaker_safe = 0
-					bluetooth_sco_carkit = 0
+					telephony_tx = 0
+					ip = 0
+					bus = 0
+					stub = 0
 
-		domain: Device2
+		domain: Device
 			conf: RemoteSubmix
 				#
-				# DTMF follows Media strategy if not in call
+				# Accessibility follows Media strategy if not in call
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes RemoteSubmix
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 1
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: BluetoothA2dp
 				#
-				# DTMF follows Media strategy if not in call
+				# Accessibility falls through media strategy if not in call (widely speaking)
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dp
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 1
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
-			conf: BluetoothA2dpHeadphones
+			conf: BluetoothA2dpHeadphone
 				#
-				# DTMF follows Media strategy if not in call
+				# Accessibility falls through media strategy if not in call (widely speaking)
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 1
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: BluetoothA2dpSpeaker
 				#
-				# DTMF follows Media strategy if not in call
+				# Accessibility falls through media strategy if not in call (widely speaking)
 				#
+				ForceUseForMedia IsNot ForceNoBtA2dp
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
-				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 1
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: ForceSpeakerWhenNotInCall
 				#
-				# DTMF follows Media strategy if not in call
+				# Accessibility follows Media strategy if not in call
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia Is ForceSpeaker
-				ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
 				AvailableOutputDevices Includes Speaker
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 1
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
+					hdmi = 0
+
+			conf: BluetoothScoCarkit
+				#
+				# accessibility falls through Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				AvailableOutputDevices Includes BluetoothScoCarkit
+				ForceUseForCommunication Is ForceBtSco
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 1
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
 					line = 0
-					speaker = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
 
 			conf: BluetoothScoHeadset
 				#
-				# DTMF falls through Phone strategy if in call
+				# accessibility falls through Phone strategy if in call
 				#
 				ANY
 					TelephonyMode Is InCall
@@ -160,28 +199,28 @@
 				AvailableOutputDevices Includes BluetoothScoHeadset
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 1
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 1
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: BluetoothSco
 				#
-				# DTMF falls through Phone strategy if in call
+				# accessibility falls through Phone strategy if in call
 				#
 				ANY
 					TelephonyMode Is InCall
@@ -189,29 +228,29 @@
 				AvailableOutputDevices Includes BluetoothSco
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 1
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 1
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: WiredHeadphone
 				ANY
 					#
-					# DTMF falls through Phone strategy if in call
+					# accessibility falls through Phone strategy if in call
 					#
 					ALL
 						ANY
@@ -219,63 +258,72 @@
 							TelephonyMode Is InCommunication
 						ForceUseForCommunication IsNot ForceSpeaker
 					#
-					# DTMF follows Media strategy if not in call
+					# accessibility follows Media strategy if not in call
 					#
 					ALL
 						TelephonyMode IsNot InCall
 						TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes WiredHeadphone
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
 					wired_headset = 0
 					wired_headphone = 1
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
-			conf: LineWhenFollowingMedia
-				#
-				# DTMF follows Media strategy if not in call
-				#
-				TelephonyMode IsNot InCall
-				TelephonyMode IsNot InCommunication
+			conf: Line
+				ANY
+					#
+					# accessibility falls through Phone strategy if in call
+					# but Line has a lower priority than WiredHeadset in this case.
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+						AvailableOutputDevices Excludes WiredHeadset
+					#
+					# accessibility follows Media strategy if not in call
+					#
 				AvailableOutputDevices Includes Line
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 1
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 1
-					speaker = 0
+					hdmi = 0
 
 			conf: WiredHeadset
 				ANY
 					#
-					# DTMF falls through Phone strategy if in call
+					# accessibility falls through Phone strategy if in call
 					#
 					ALL
 						ANY
@@ -283,36 +331,36 @@
 							TelephonyMode Is InCommunication
 						ForceUseForCommunication IsNot ForceSpeaker
 					#
-					# DTMF follows Media strategy if not in call
+					# accessibility follows Media strategy if not in call
 					#
 					ALL
 						TelephonyMode IsNot InCall
 						TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes WiredHeadset
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 1
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 1
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: UsbDevice
 				ANY
 					#
-					# DTMF falls through Phone strategy if in call (widely speaking)
+					# accessibility falls through Phone strategy if in call (widely speaking)
 					#
 					ALL
 						ANY
@@ -320,7 +368,7 @@
 							TelephonyMode Is InCommunication
 						ForceUseForCommunication IsNot ForceSpeaker
 					#
-					# DTMF follows Media strategy if not in call
+					# accessibility follows Media strategy if not in call
 					# Media strategy inverts the priority of USB device vs accessory
 					#
 					ALL
@@ -330,151 +378,151 @@
 						ForceUseForCommunication Is ForceSpeaker
 				AvailableOutputDevices Includes UsbDevice
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 1
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: UsbAccessory
 				#
-				# DTMF falls through Phone strategy if in call (widely speaking)
+				# accessibility falls through Phone strategy if in call (widely speaking)
 				# but USB accessory not reachable in call
 				#
-				# DTMF follows Media strategy if not in call
+				# accessibility follows Media strategy if not in call
 				# Media strategy inverts the priority of USB device vs accessory
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes UsbAccessory
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 1
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: DgtlDockHeadset
 				#
-				# DTMF falls through Phone strategy if in call (widely speaking)
+				# accessibility falls through Phone strategy if in call (widely speaking)
 				# but DgtlDockHeadset not reachable in call
 				#
-				# DTMF follows Media strategy if not in call
+				# accessibility follows Media strategy if not in call
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes DgtlDockHeadset
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 1
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
-			conf: Hdmi
+			conf: AuxDigital
 				#
-				# DTMF falls through Phone strategy if in call (widely speaking)
+				# accessibility falls through Phone strategy if in call (widely speaking)
 				# but Hdmi not reachable in call
 				#
-				# DTMF follows Media strategy if not in call
+				# accessibility follows Media strategy if not in call
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes Hdmi
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 1
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 1
 
 			conf: AnlgDockHeadset
 				#
-				# DTMF falls through Phone strategy if in call (widely speaking)
+				# accessibility falls through Phone strategy if in call (widely speaking)
 				# but AnlgDockHeadset not reachable in call
 				#
-				# DTMF follows Media strategy if not in call
+				# accessibility follows Media strategy if not in call
 				# Media strategy inverts the priority of USB device vs accessory
 				#
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
-				ForceUseForDock Is ForceAnalogDock
 				AvailableOutputDevices Includes AnlgDockHeadset
+				ForceUseForDock Is ForceAnalogDock
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 1
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
 			conf: Earpiece
 				#
-				# DTMF falls through Phone strategy if in call
+				# accessibility falls through Phone strategy if in call
 				#
 				ANY
 					TelephonyMode Is InCall
@@ -482,156 +530,64 @@
 				AvailableOutputDevices Includes Earpiece
 				ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 1
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					telephony_tx = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
 					line = 0
-					speaker = 0
-
-			conf: LineWhenFallThroughPhone
-				#
-				# DTMF falls through Phone strategy if in call
-				#
-				ANY
-					TelephonyMode Is InCall
-					TelephonyMode Is InCommunication
-				AvailableOutputDevices Includes Line
-				ForceUseForCommunication Is ForceSpeaker
-
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
-					remote_submix = 0
-					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					hdmi = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 1
-					speaker = 0
+					hdmi = 0
 
 			conf: Speaker
-				ANY
-					#
-					# DTMF falls through Phone strategy if in call
-					#
-					ALL
-						ANY
-							TelephonyMode Is InCall
-							TelephonyMode Is InCommunication
-						ForceUseForCommunication Is ForceSpeaker
-					#
-					# DTMF follows Media strategy if not in call
-					#
-					ALL
-						TelephonyMode IsNot InCall
-						TelephonyMode IsNot InCommunication
-						ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
 				AvailableOutputDevices Includes Speaker
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 1
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 1
+					hdmi = 0
 
 			conf: Default
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
 					bluetooth_a2dp = 0
 					bluetooth_a2dp_headphones = 0
 					bluetooth_a2dp_speaker = 0
-					hdmi = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_sco = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
 					usb_accessory = 0
 					usb_device = 0
-					telephony_tx = 0
-					line = 0
-					speaker = 0
+					hdmi = 0
 
-		domain: Arc
-			#
-			# DTMF strategy follows media strategy if not in call
-			# these following domains consists in device(s) that can co-exist with others
-			# e.g. ARC, SPDIF, AUX_LINE
-			#
-			conf: Selected
-				TelephonyMode IsNot InCall
-				TelephonyMode IsNot InCommunication
-				AvailableOutputDevices Includes HdmiArc
-
-				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 1
-
-			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 0
-
-		domain: Spdif
-			#
-			# DTMF strategy follows media strategy if not in call
-			# these following domains consists in device(s) that can co-exist with others
-			# e.g. ARC, SPDIF, AUX_LINE
-			#
-			conf: Selected
-				TelephonyMode IsNot InCall
-				TelephonyMode IsNot InCommunication
-				AvailableOutputDevices Includes Spdif
-
-				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 1
-
-			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 0
-
-		domain: AuxLine
-			#
-			# DTMF strategy follows media strategy if not in call
-			# these following domains consists in device(s) that can co-exist with others
-			# e.g. ARC, SPDIF, AUX_LINE
-			#
-			conf: Selected
-				TelephonyMode IsNot InCall
-				TelephonyMode IsNot InCommunication
-				AvailableOutputDevices Includes AuxLine
-
-				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 1
-
-			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
similarity index 91%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
index 85273b2..883c741 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
@@ -8,6 +8,9 @@
 					fm = 0
 					speaker_safe = 0
 					bluetooth_sco_carkit = 0
+					ip = 0
+					bus = 0
+					stub = 0
 
 		domain: Device2
 			conf: RemoteSubmix
@@ -245,12 +248,24 @@
 					line = 0
 					speaker = 0
 
-			conf: LineWhenFollowingMedia
-				#
-				# DTMF follows Media strategy if not in call
-				#
-				TelephonyMode IsNot InCall
-				TelephonyMode IsNot InCommunication
+			conf: Line
+				ANY
+					#
+					# DTMF falls through Phone strategy if in call
+					# but Line has a lower priority than WiredHeadset in this case.
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+						AvailableOutputDevices Excludes WiredHeadset
+					#
+					# DTMF follows Media strategy if not in call
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes Line
 
 				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
@@ -501,35 +516,6 @@
 					line = 0
 					speaker = 0
 
-			conf: LineWhenFallThroughPhone
-				#
-				# DTMF falls through Phone strategy if in call
-				#
-				ANY
-					TelephonyMode Is InCall
-					TelephonyMode Is InCommunication
-				AvailableOutputDevices Includes Line
-				ForceUseForCommunication Is ForceSpeaker
-
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
-					remote_submix = 0
-					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					hdmi = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					telephony_tx = 0
-					line = 1
-					speaker = 0
-
 			conf: Speaker
 				ANY
 					#
@@ -599,10 +585,10 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes HdmiArc
 
-				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 1
+				/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 0
+				/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc = 0
 
 		domain: Spdif
 			#
@@ -615,10 +601,10 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes Spdif
 
-				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 1
+				/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 0
+				/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif = 0
 
 		domain: AuxLine
 			#
@@ -631,7 +617,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes AuxLine
 
-				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 1
+				/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 0
+				/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
similarity index 94%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
index d714743..f504631 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
@@ -6,11 +6,14 @@
 			conf: Calibration
 				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
 					# no enforced_audible on remote submix (e.g. WFD)
-					remote_submix = 0
 					hdmi_arc = 0
 					spdif = 0
 					aux_line = 0
 					speaker_safe = 0
+					ip = 0
+					bus = 0
+					fm = 0
+					stub = 0
 
 		domain: Speaker
 			conf: Selected
@@ -77,8 +80,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: BluetoothA2dp
 				AvailableOutputDevices Includes BluetoothA2dp
@@ -102,8 +103,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: BluetoothA2dpHeadphones
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
@@ -127,8 +126,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: BluetoothA2dpSpeaker
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
@@ -152,8 +149,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: WiredHeadphone
 				ForceUseForMedia IsNot ForceSpeaker
@@ -177,8 +172,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: Line
 				ForceUseForMedia IsNot ForceSpeaker
@@ -202,8 +195,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 1
-					fm = 0
-					speaker_safe = 0
 
 			conf: WiredHeadset
 				ForceUseForMedia IsNot ForceSpeaker
@@ -227,8 +218,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: UsbAccessory
 				ForceUseForMedia IsNot ForceSpeaker
@@ -252,8 +241,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: UsbDevice
 				ForceUseForMedia IsNot ForceSpeaker
@@ -277,8 +264,6 @@
 					usb_device = 1
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: DgtlDockHeadset
 				ForceUseForMedia IsNot ForceSpeaker
@@ -302,8 +287,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: Hdmi
 				ForceUseForMedia IsNot ForceSpeaker
@@ -327,8 +310,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
 
 			conf: AnlgDockHeadset
 				ForceUseForMedia IsNot ForceSpeaker
@@ -353,6 +334,25 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					fm = 0
-					speaker_safe = 0
+
+			conf: NoDevice
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_media.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
similarity index 97%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_media.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
index 38bede5..bdb6ae0 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_media.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
@@ -12,6 +12,9 @@
 					bluetooth_sco_headset = 0
 					bluetooth_sco_carkit = 0
 					telephony_tx = 0
+					ip = 0
+					bus = 0
+					stub = 0
 
 		domain: Device2
 			conf: RemoteSubmix
@@ -34,6 +37,7 @@
 
 			conf: BluetoothA2dp
 				ForceUseForMedia IsNot ForceNoBtA2dp
+				ForceUseForCommunication IsNot ForceBtSco
 				AvailableOutputDevices Includes BluetoothA2dp
 
 				component: /Policy/policy/strategies/media/selected_output_devices/mask
@@ -53,6 +57,7 @@
 
 			conf: BluetoothA2dpHeadphone
 				ForceUseForMedia IsNot ForceNoBtA2dp
+				ForceUseForCommunication IsNot ForceBtSco
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 
 				component: /Policy/policy/strategies/media/selected_output_devices/mask
@@ -72,6 +77,7 @@
 
 			conf: BluetoothA2dpSpeaker
 				ForceUseForMedia IsNot ForceNoBtA2dp
+				ForceUseForCommunication IsNot ForceBtSco
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 
 				component: /Policy/policy/strategies/media/selected_output_devices/mask
@@ -263,6 +269,7 @@
 				# If hdmi system audio mode is on, remove speaker out of output list.
 				#
 				ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
+				ForceUseForCommunication IsNot ForceBtSco
 
 				component: /Policy/policy/strategies/media/selected_output_devices/mask
 					speaker = 1
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_phone.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw
similarity index 96%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_phone.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw
index 7b01491..d371ad9 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_phone.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw
@@ -12,6 +12,9 @@
 					spdif = 0
 					fm = 0
 					speaker_safe = 0
+					ip = 0
+					bus = 0
+					stub = 0
 
 		domain: Device
 			conf: ScoCarkit
@@ -92,9 +95,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceNoBtA2dp
-				ANY
-					ForceUseForCommunication Is ForceBtSco
-					ForceUseForCommunication Is ForceNone
+				ForceUseForCommunication Is ForceNone
 
 				component: /Policy/policy/strategies/phone/selected_output_devices/mask
 					earpiece = 0
@@ -124,9 +125,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceNoBtA2dp
-				ANY
-					ForceUseForCommunication Is ForceBtSco
-					ForceUseForCommunication Is ForceNone
+				ForceUseForCommunication Is ForceNone
 
 				component: /Policy/policy/strategies/phone/selected_output_devices/mask
 					earpiece = 0
@@ -166,8 +165,8 @@
 					bluetooth_sco_headset = 0
 					bluetooth_sco_carkit = 0
 					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 1
-					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 1
 					hdmi = 0
 					angl_dock_headset = 0
 					dgtl_dock_headset = 0
@@ -231,6 +230,29 @@
 					line = 0
 					speaker = 0
 
+			conf: Line
+				AvailableOutputDevices Includes Line
+				ForceUseForCommunication IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 1
+					speaker = 0
+
 			conf: UsbDevice
 				#
 				# Fallback BT Sco devices in case of FORCE_BT_SCO
@@ -408,33 +430,6 @@
 					line = 0
 					speaker = 0
 
-			conf: Line
-				#
-				# Fallback BT Sco devices in case of FORCE_BT_SCO
-				# or FORCE_NONE
-				#
-				AvailableOutputDevices Includes Line
-				ForceUseForCommunication Is ForceSpeaker
-
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
-					earpiece = 0
-					wired_headset = 0
-					wired_headphone = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
-					bluetooth_sco_carkit = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					hdmi = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					telephony_tx = 0
-					line = 1
-					speaker = 0
-
 			conf: Speaker
 				#
 				# Fallback BT Sco devices in case of FORCE_BT_SCO
@@ -463,6 +458,9 @@
 					speaker = 1
 
 			conf: Default
+				#
+				# Fallback on default output device which can be speaker for example
+				#
 				component: /Policy/policy/strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
@@ -480,6 +478,6 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
-					speaker = 0
+					speaker = 1
 
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_rerouting.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
similarity index 99%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_rerouting.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
index d390a33..04e62f7 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_rerouting.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
@@ -17,6 +17,9 @@
 					bluetooth_sco_headset = 0
 					bluetooth_sco_carkit = 0
 					telephony_tx = 0
+					ip = 0
+					bus = 0
+					stub = 0
 
 		domain: Device2
 			conf: RemoteSubmix
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw
similarity index 98%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw
index 71101f8..70740d1 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw
@@ -16,6 +16,9 @@
 					# Sonification follows phone strategy if in call but HDMI is not reachable
 					#
 					hdmi = 0
+					ip = 0
+					bus = 0
+					stub = 0
 
 		domain: Speaker
 
@@ -247,12 +250,14 @@
 				ANY
 					#
 					# Sonification follows Phone strategy if in call (widely speaking)
+					# but Line has a lower priority than WiredHeadset in this case.
 					#
 					ALL
 						ANY
 							TelephonyMode Is InCall
 							TelephonyMode Is InCommunication
-						ForceUseForCommunication Is ForceSpeaker
+						ForceUseForCommunication IsNot ForceSpeaker
+						AvailableOutputDevices Excludes WiredHeadset
 					#
 					# Sonification falls through media strategy if not in call (widely speaking)
 					#
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification_respectful.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
similarity index 93%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification_respectful.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
index f66674c..b30aa4c 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification_respectful.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
@@ -22,6 +22,9 @@
 					spdif = 0
 					fm = 0
 					telephony_tx = 0
+					ip = 0
+					bus = 0
+					stub = 0
 
 		domain: Speakers
 
@@ -267,15 +270,29 @@
 					usb_device = 0
 					hdmi = 0
 
-			conf: LineWhenFollowMediaStrategy
-				#
-				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
-				# SonificationRespectful follows media if music stream is active
-				#
-				TelephonyMode IsNot InCall
-				TelephonyMode IsNot InCommunication
-				AvailableOutputDevices Includes WiredHeadphone
-				ForceUseForMedia IsNot ForceSpeaker
+			conf: Line
+				ANY
+					#
+					# SonificationRespectful Follows Phone strategy if in call
+					# but Line has a lower priority than WiredHeadset in this case.
+					#
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+						AvailableOutputDevices Excludes WiredHeadset
+					#
+					# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+					# SonificationRespectful follows media if music stream is active
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						AvailableOutputDevices Includes WiredHeadphone
+						ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes Line
 
 				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
@@ -517,29 +534,3 @@
 					usb_device = 0
 					hdmi = 0
 
-			conf: Line
-				#
-				# SonificationRespectful Follows Phone strategy if in call
-				#
-				ANY
-					TelephonyMode Is InCall
-					TelephonyMode Is InCommunication
-				ForceUseForCommunication Is ForceSpeaker
-				AvailableOutputDevices Includes Line
-
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
-					earpiece = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
-					bluetooth_sco_carkit = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					bluetooth_a2dp = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 1
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_transmitted_through_speaker.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw
similarity index 96%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_transmitted_through_speaker.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw
index e5ae9d9..9f9c211 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_transmitted_through_speaker.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw
@@ -26,6 +26,9 @@
 					usb_device = 0
 					telephony_tx = 0
 					line = 0
+					ip = 0
+					bus = 0
+					stub = 0
 
 		domain: Speaker
 			conf: Selected
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_stream.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw
similarity index 100%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_stream.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_usage.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
similarity index 100%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_usage.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/volumes.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/volumes.pfw
new file mode 100644
index 0000000..7db4537
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/volumes.pfw
@@ -0,0 +1,28 @@
+supDomain: VolumeProfilesForStream
+	domain: Calibration
+		conf: Calibration
+			/Policy/policy/streams/voice_call/applicable_volume_profile/volume_profile = voice_call
+			/Policy/policy/streams/system/applicable_volume_profile/volume_profile = system
+			/Policy/policy/streams/ring/applicable_volume_profile/volume_profile = ring
+			/Policy/policy/streams/music/applicable_volume_profile/volume_profile = music
+			/Policy/policy/streams/alarm/applicable_volume_profile/volume_profile = alarm
+			/Policy/policy/streams/notification/applicable_volume_profile/volume_profile = notification
+			/Policy/policy/streams/bluetooth_sco/applicable_volume_profile/volume_profile = bluetooth_sco
+			/Policy/policy/streams/enforced_audible/applicable_volume_profile/volume_profile = enforced_audible
+			/Policy/policy/streams/tts/applicable_volume_profile/volume_profile = tts
+			/Policy/policy/streams/accessibility/applicable_volume_profile/volume_profile = accessibility
+			/Policy/policy/streams/rerouting/applicable_volume_profile/volume_profile = rerouting
+			/Policy/policy/streams/patch/applicable_volume_profile/volume_profile = patch
+
+	domain: Dtmf
+		conf: InCall
+			ANY
+				TelephonyMode Is InCall
+				TelephonyMode Is InCommunication
+
+			/Policy/policy/streams/dtmf/applicable_volume_profile/volume_profile = voice_call
+
+		conf: OutOfCall
+			/Policy/policy/streams/dtmf/applicable_volume_profile/volume_profile = dtmf
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/device_for_input_source.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/device_for_input_source.pfw
new file mode 100644
index 0000000..611d8f5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/device_for_input_source.pfw
@@ -0,0 +1,285 @@
+supDomain: DeviceForInputSource
+	domain: Calibration
+		conf: Calibration
+			#
+			# Note that ALL input devices must have the sign bit set to 1.
+			# As the devices is a mask, use the "in" bit as a direction indicator.
+			#
+			component: /Policy/policy/input_sources/default/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/mic/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/voice_downlink/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/voice_call/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/voice_uplink/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/voice_recognition/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/hotword/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/unprocessed/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+			component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0>
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/device_for_strategies.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/device_for_strategies.pfw
new file mode 100644
index 0000000..917d4a7
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/device_for_strategies.pfw
@@ -0,0 +1,255 @@
+domain: DeviceForStrategy
+	conf: Calibration
+		component: /Policy/policy/strategies
+			component: media/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+			component: phone/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+			component: sonification/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+			component: sonification_respectful/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+			component: dtmf/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+			component: enforced_audible/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+			component: transmitted_through_speaker/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+			component: accessibility/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
+			component: rerouting/selected_output_devices/mask
+				earpiece = 0
+				speaker = 0
+				wired_headset = 0
+				wired_headphone = 0
+				bluetooth_sco = 0
+				bluetooth_sco_headset = 0
+				bluetooth_sco_carkit = 0
+				bluetooth_a2dp = 0>
+				bluetooth_a2dp_headphones = 0
+				bluetooth_a2dp_speaker = 0
+				hdmi = 0
+				angl_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				remote_submix = 0
+				telephony_tx = 0
+				line = 0
+				hdmi_arc = 0
+				spdif = 0
+				fm = 0
+				aux_line = 0
+				speaker_safe = 0
+				ip = 0
+				bus = 0
+				stub = 1
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicyClass.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicyClass.xml
similarity index 100%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicyClass.xml
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicyClass.xml
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-CommonTypes.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
similarity index 80%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-CommonTypes.xml
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
index 821d6ad..461e44a 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-CommonTypes.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
@@ -6,8 +6,6 @@
     profile. It must match with the output device enum parameter.
     -->
      <!--#################### GLOBAL COMPONENTS BEGIN ####################-->
-     <!-- Common Types defintion -->
-     <xi:include href="PolicySubsystem-Volume.xml"/>
 
      <!--#################### GLOBAL COMPONENTS END ####################-->
 
@@ -30,13 +28,16 @@
             <BitParameter Name="usb_accessory" Size="1" Pos="13"/>
             <BitParameter Name="usb_device" Size="1" Pos="14"/>
             <BitParameter Name="remote_submix" Size="1" Pos="15"/>
-            <BitParameter Name="telephony_tx" Size="1" Pos="26"/>
+            <BitParameter Name="telephony_tx" Size="1" Pos="16"/>
             <BitParameter Name="line" Size="1" Pos="17"/>
             <BitParameter Name="hdmi_arc" Size="1" Pos="18"/>
             <BitParameter Name="spdif" Size="1" Pos="19"/>
             <BitParameter Name="fm" Size="1" Pos="20"/>
             <BitParameter Name="aux_line" Size="1" Pos="21"/>
             <BitParameter Name="speaker_safe" Size="1" Pos="22"/>
+            <BitParameter Name="ip" Size="1" Pos="23"/>
+            <BitParameter Name="bus" Size="1" Pos="24"/>
+            <BitParameter Name="stub" Size="1" Pos="30"/>
         </BitParameterBlock>
     </ComponentType>
 
@@ -64,7 +65,9 @@
             <BitParameter Name="spdif" Size="1" Pos="16"/>
             <BitParameter Name="bluetooth_a2dp" Size="1" Pos="17"/>
             <BitParameter Name="loopback" Size="1" Pos="18"/>
-            <BitParameter Name="in" Size="1" Pos="31"/>
+            <BitParameter Name="ip" Size="1" Pos="19"/>
+            <BitParameter Name="bus" Size="1" Pos="20"/>
+            <BitParameter Name="stub" Size="1" Pos="30"/>
         </BitParameterBlock>
     </ComponentType>
 
@@ -85,6 +88,10 @@
             <BitParameter Name="compress_offload" Size="1" Pos="4"/>
             <BitParameter Name="non_blocking" Size="1" Pos="5"/>
             <BitParameter Name="hw_av_sync" Size="1" Pos="6"/>
+            <BitParameter Name="tts" Size="1" Pos="7"/>
+            <BitParameter Name="raw" Size="1" Pos="8"/>
+            <BitParameter Name="sync" Size="1" Pos="9"/>
+            <BitParameter Name="iec958_nonaudio" Size="1" Pos="10"/>
         </BitParameterBlock>
     </ComponentType>
 
@@ -96,6 +103,8 @@
         <BitParameterBlock Name="mask" Size="32">
             <BitParameter Name="fast" Size="1" Pos="0"/>
             <BitParameter Name="hw_hotword" Size="1" Pos="2"/>
+            <BitParameter Name="raw" Size="1" Pos="3"/>
+            <BitParameter Name="sync" Size="1" Pos="4"/>
         </BitParameterBlock>
     </ComponentType>
 
@@ -111,8 +120,9 @@
             <BitParameter Name="voice_recognition" Size="1" Pos="6"/>
             <BitParameter Name="voice_communication" Size="1" Pos="7"/>
             <BitParameter Name="remote_submix" Size="1" Pos="8"/>
-            <BitParameter Name="fm_tuner" Size="1" Pos="9"/>
-            <BitParameter Name="hotword" Size="1" Pos="10"/>
+            <BitParameter Name="unprocessed" Size="1" Pos="9"/>
+            <BitParameter Name="fm_tuner" Size="1" Pos="10"/>
+            <BitParameter Name="hotword" Size="1" Pos="11"/>
         </BitParameterBlock>
     </ComponentType>
 
@@ -142,10 +152,28 @@
 
     <!--#################### STREAM COMMON TYPES BEGIN ####################-->
 
+    <ComponentType Name="VolumeProfileType">
+        <EnumParameter Name="volume_profile" Size="32">
+            <ValuePair Literal="voice_call" Numerical="0"/>
+            <ValuePair Literal="system" Numerical="1"/>
+            <ValuePair Literal="ring" Numerical="2"/>
+            <ValuePair Literal="music" Numerical="3"/>
+            <ValuePair Literal="alarm" Numerical="4"/>
+            <ValuePair Literal="notification" Numerical="5"/>
+            <ValuePair Literal="bluetooth_sco" Numerical="6"/>
+            <ValuePair Literal="enforced_audible" Numerical="7"/>
+            <ValuePair Literal="dtmf" Numerical="8"/>
+            <ValuePair Literal="tts" Numerical="9"/>
+            <ValuePair Literal="accessibility" Numerical="10"/>
+            <ValuePair Literal="rerouting" Numerical="11"/>
+            <ValuePair Literal="patch" Numerical="12"/>
+        </EnumParameter>
+    </ComponentType>
+
     <ComponentType Name="Stream">
-        <Component Name="applicable_strategy" Type="Strategy" Mapping="Stream:'%1'"/>
-        <Component Name="volume_profiles" Type="VolumeCurvesCategories"
-                   Description="A volume profile is refered by the stream type."/>
+        <Component Name="applicable_strategy" Type="Strategy"/>
+        <Component Name="applicable_volume_profile" Type="VolumeProfileType"
+                   Description="Volume profile followed by a given stream type."/>
     </ComponentType>
 
     <!--#################### STREAM COMMON TYPES END ####################-->
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
similarity index 87%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
index b21f6ae..71b2b62 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
@@ -2,7 +2,7 @@
 <Subsystem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
            xmlns:xi="http://www.w3.org/2001/XInclude"
            xsi:noNamespaceSchemaLocation="Schemas/Subsystem.xsd"
-           Name="policy" Type="Policy" Endianness="Little">
+           Name="policy" Type="Policy">
 
     <ComponentLibrary>
         <!--#################### GLOBAL COMPONENTS BEGIN ####################-->
@@ -38,29 +38,29 @@
 
         <ComponentType Name="Streams" Description="associated to audio_stream_type_t definition,
                              identifier mapping must match the value of the enum">
-            <Component Name="voice_call" Type="Stream" Mapping="Amend1:VoiceCall,Identifier:0"/>
-            <Component Name="system" Type="Stream" Mapping="Amend1:System,Identifier:1"/>
-            <Component Name="ring" Type="Stream" Mapping="Amend1:Ring,Identifier:2"/>
-            <Component Name="music" Type="Stream" Mapping="Amend1:Music,Identifier:3"/>
-            <Component Name="alarm" Type="Stream" Mapping="Amend1:Alarm,Identifier:4"/>
+            <Component Name="voice_call" Type="Stream" Mapping="Stream:VoiceCall,Identifier:0"/>
+            <Component Name="system" Type="Stream" Mapping="Stream:System,Identifier:1"/>
+            <Component Name="ring" Type="Stream" Mapping="Stream:Ring,Identifier:2"/>
+            <Component Name="music" Type="Stream" Mapping="Stream:Music,Identifier:3"/>
+            <Component Name="alarm" Type="Stream" Mapping="Stream:Alarm,Identifier:4"/>
             <Component Name="notification" Type="Stream"
-                                           Mapping="Amend1:Notification,Identifier:5"/>
+                                           Mapping="Stream:Notification,Identifier:5"/>
             <Component Name="bluetooth_sco" Type="Stream"
-                                            Mapping="Amend1:BluetoothSco,Identifier:6"/>
+                                            Mapping="Stream:BluetoothSco,Identifier:6"/>
             <Component Name="enforced_audible" Type="Stream"
-                                               Mapping="Amend1:EnforceAudible,Identifier:7"
+                                               Mapping="Stream:EnforceAudible,Identifier:7"
                        Description="Sounds that cannot be muted by user and must
                                     be routed to speaker"/>
-            <Component Name="dtmf" Type="Stream" Mapping="Amend1:Dtmf,Identifier:8"/>
-            <Component Name="tts" Type="Stream" Mapping="Amend1:Tts,Identifier:9"
+            <Component Name="dtmf" Type="Stream" Mapping="Stream:Dtmf,Identifier:8"/>
+            <Component Name="tts" Type="Stream" Mapping="Stream:Tts,Identifier:9"
                              Description="Transmitted Through Speaker.
                                           Plays over speaker only, silent on other devices"/>
             <Component Name="accessibility" Type="Stream"
-                                            Mapping="Amend1:Accessibility,Identifier:10"
+                                            Mapping="Stream:Accessibility,Identifier:10"
                              Description="For accessibility talk back prompts"/>
-            <Component Name="rerouting" Type="Stream" Mapping="Amend1:Rerouting,Identifier:11"
+            <Component Name="rerouting" Type="Stream" Mapping="Stream:Rerouting,Identifier:11"
                              Description="For dynamic policy output mixes"/>
-            <Component Name="patch" Type="Stream" Mapping="Amend1:Patch,Identifier:12"
+            <Component Name="patch" Type="Stream" Mapping="Stream:Patch,Identifier:12"
                              Description="For internal audio flinger tracks. Fixed volume"/>
         </ComponentType>
 
@@ -120,6 +120,8 @@
                                                   Mapping="Amend1:VoiceCommunication,Identifier:7"/>
             <Component Name="remote_submix" Type="InputSource"
                                             Mapping="Amend1:RemoteSubmix,Identifier:8"/>
+            <Component Name="unprocessed" Type="InputSource"
+                                            Mapping="Amend1:Unprocessed,Identifier:9"/>
             <Component Name="fm_tuner" Type="InputSource" Mapping="Amend1:FmTuner,Identifier:1998"/>
             <Component Name="hotword" Type="InputSource" Mapping="Amend1:Hotword,Identifier:1999"/>
         </ComponentType>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/policy_criteria.txt b/services/audiopolicy/engineconfigurable/parameter-framework/examples/policy_criteria.txt
similarity index 89%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/policy_criteria.txt
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/policy_criteria.txt
index ef06498..480cbe1 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/policy_criteria.txt
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/policy_criteria.txt
@@ -1,9 +1,10 @@
 ExclusiveCriterion TelephonyMode                :   Normal          RingTone                InCall              InCommunication
-InclusiveCriterion AvailableInputDevices        :   Communication Ambient BuiltinMic BluetoothScoHeadset WiredHeadset Hdmi TelephonyRx BackMic RemoteSubmix AnlgDockHeadset DgtlDockHeadset UsbAccessory UsbDevice FmTuner TvTuner Line Spdif BluetoothA2dp Loopback
-InclusiveCriterion AvailableOutputDevices       :   Earpiece Speaker WiredSpeaker WiredHeadset WiredHeadphone BluetoothSco BluetoothScoHeadset BluetoothScoCarkit BluetoothA2dp BluetoothA2dpHeadphones BluetoothA2dpSpeaker Hdmi AnlgDockHeadset DgtlDockHeadset UsbAccessory UsbDevice RemoteSubmix TelephonyTx Line HdmiArc Spdif Fm AuxLine SpeakerSafe
+InclusiveCriterion AvailableInputDevices        :   Communication Ambient BuiltinMic BluetoothScoHeadset WiredHeadset Hdmi TelephonyRx BackMic RemoteSubmix AnlgDockHeadset DgtlDockHeadset UsbAccessory UsbDevice FmTuner TvTuner Line Spdif BluetoothA2dp Loopback Ip Bus Stub
+InclusiveCriterion AvailableOutputDevices       :   Earpiece Speaker WiredSpeaker WiredHeadset WiredHeadphone BluetoothSco BluetoothScoHeadset BluetoothScoCarkit BluetoothA2dp BluetoothA2dpHeadphones BluetoothA2dpSpeaker Hdmi AnlgDockHeadset DgtlDockHeadset UsbAccessory UsbDevice RemoteSubmix TelephonyTx Line HdmiArc Spdif Fm AuxLine SpeakerSafe Ip Bus Stub
 ExclusiveCriterion ForceUseForCommunication     :   ForceNone       ForceSpeaker            ForceBtSco
 ExclusiveCriterion ForceUseForMedia             :   ForceNone       ForceSpeaker			ForceHeadphones         ForceBtA2dp         ForceWiredAccessory ForceAnalogDock ForceDigitalDock    ForceNoBtA2dp       ForceSystemEnforced
 ExclusiveCriterion ForceUseForRecord            :   ForceNone       ForceBtSco              ForceWiredAccessory
 ExclusiveCriterion ForceUseForDock              :   ForceNone       ForceWiredAccessory     ForceBtCarDock      ForceBtDeskDock     ForceAnalogDock ForceDigitalDock
 ExclusiveCriterion ForceUseForSystem            :   ForceNone       ForceSystemEnforced
 ExclusiveCriterion ForceUseForHdmiSystemAudio   :   ForceNone       ForceHdmiSystemEnforced
+ExclusiveCriterion ForceUseForEncodedSurround   :   ForceNone       ForceEncodedSurroundNever   ForceEncodedSurroundAlways
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
index a523656..0e44f2c 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
@@ -1,5 +1,7 @@
 LOCAL_PATH := $(call my-dir)
 
+ifneq ($(USE_CUSTOM_PARAMETER_FRAMEWORK), true)
+
 include $(CLEAR_VARS)
 
 LOCAL_MODULE_TAGS := optional
@@ -8,7 +10,6 @@
     PolicySubsystem.cpp \
     Strategy.cpp \
     InputSource.cpp \
-    VolumeProfile.cpp \
     Stream.cpp \
     Usage.cpp
 
@@ -16,9 +17,10 @@
     -Wall \
     -Werror \
     -Wextra \
+    -fvisibility-inlines-hidden \
+    -fvisibility=hidden
 
 LOCAL_C_INCLUDES := \
-    $(TOPDIR)external/parameter-framework/parameter \
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
     $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include \
     $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/interface \
@@ -26,11 +28,15 @@
 LOCAL_SHARED_LIBRARIES := \
     libaudiopolicyengineconfigurable  \
     libparameter \
-    libxmlserializer \
     liblog \
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+LOCAL_STATIC_LIBRARIES := libpfw_utility
+
 LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE := libpolicy-subsystem
 
 include $(BUILD_SHARED_LIBRARY)
 
+endif # ifneq ($(USE_CUSTOM_PARAMETER_FRAMEWORK), true)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
index 497d555..eac4efe 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
@@ -21,33 +21,26 @@
 using std::string;
 
 InputSource::InputSource(const string &mappingValue,
-                   CInstanceConfigurableElement *instanceConfigurableElement,
-                   const CMappingContext &context)
+                         CInstanceConfigurableElement *instanceConfigurableElement,
+                         const CMappingContext &context, core::log::Logger &logger)
     : CFormattedSubsystemObject(instanceConfigurableElement,
+                                logger,
                                 mappingValue,
                                 MappingKeyAmend1,
                                 (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
                                 context),
       mPolicySubsystem(static_cast<const PolicySubsystem *>(
                            instanceConfigurableElement->getBelongingSubsystem())),
-      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface()),
-      mApplicableInputDevice(mDefaultApplicableInputDevice)
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
 {
     mId = static_cast<audio_source_t>(context.getItemAsInteger(MappingKeyIdentifier));
     // Declares the strategy to audio policy engine
     mPolicyPluginInterface->addInputSource(getFormattedMappingValue(), mId);
 }
 
-bool InputSource::receiveFromHW(string & /*error*/)
-{
-    blackboardWrite(&mApplicableInputDevice, sizeof(mApplicableInputDevice));
-    return true;
-}
-
 bool InputSource::sendToHW(string & /*error*/)
 {
     uint32_t applicableInputDevice;
     blackboardRead(&applicableInputDevice, sizeof(applicableInputDevice));
-    mApplicableInputDevice = applicableInputDevice;
-    return mPolicyPluginInterface->setDeviceForInputSource(mId, mApplicableInputDevice);
+    return mPolicyPluginInterface->setDeviceForInputSource(mId, applicableInputDevice);
 }
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
index 67c5b50..58f3c06d 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
@@ -28,11 +28,11 @@
 {
 public:
     InputSource(const std::string &mappingValue,
-             CInstanceConfigurableElement *instanceConfigurableElement,
-             const CMappingContext &context);
+                CInstanceConfigurableElement *instanceConfigurableElement,
+                const CMappingContext &context,
+                core::log::Logger& logger);
 
 protected:
-    virtual bool receiveFromHW(std::string &error);
     virtual bool sendToHW(std::string &error);
 
 private:
@@ -44,6 +44,4 @@
     android::AudioPolicyPluginInterface *mPolicyPluginInterface;
 
     audio_source_t mId; /**< input source identifier to link with audio.h. */
-    uint32_t mApplicableInputDevice; /**< applicable input device for this strategy. */
-    static const uint32_t mDefaultApplicableInputDevice = 0; /**< default input device. */
 };
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
index bf3906d..98d10a9 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
@@ -20,7 +20,6 @@
 #include "Strategy.h"
 #include "Stream.h"
 #include "InputSource.h"
-#include "VolumeProfile.h"
 #include "Usage.h"
 #include <AudioPolicyPluginInterface.h>
 #include <AudioPolicyEngineInstance.h>
@@ -40,10 +39,9 @@
 const char *const PolicySubsystem::mStrategyComponentName = "Strategy";
 const char *const PolicySubsystem::mInputSourceComponentName = "InputSource";
 const char *const PolicySubsystem::mUsageComponentName = "Usage";
-const char *const PolicySubsystem::mVolumeProfileComponentName = "VolumeProfile";
 
-PolicySubsystem::PolicySubsystem(const std::string &name)
-    : CSubsystem(name),
+PolicySubsystem::PolicySubsystem(const std::string &name, core::log::Logger &logger)
+    : CSubsystem(name, logger),
       mPluginInterface(NULL)
 {
     // Try to connect a Plugin Interface from Audio Policy Engine
@@ -67,7 +65,7 @@
     addSubsystemObjectFactory(
         new TSubsystemObjectFactory<Stream>(
             mStreamComponentName,
-            (1 << MappingKeyAmend1) | (1 << MappingKeyIdentifier))
+            (1 << MappingKeyIdentifier))
         );
     addSubsystemObjectFactory(
         new TSubsystemObjectFactory<Strategy>(
@@ -84,11 +82,6 @@
             mInputSourceComponentName,
             (1 << MappingKeyAmend1) | (1 << MappingKeyIdentifier))
         );
-    addSubsystemObjectFactory(
-        new TSubsystemObjectFactory<VolumeProfile>(
-            mVolumeProfileComponentName,
-            (1 << MappingKeyAmend1) | (1 << MappingKeyIdentifier) | (1 << MappingKeyIdentifier))
-        );
 }
 
 // Retrieve Route interface
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
index 3c26fe1..822eeb9 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
@@ -28,7 +28,7 @@
 class PolicySubsystem : public CSubsystem
 {
 public:
-    PolicySubsystem(const std::string &strName);
+    PolicySubsystem(const std::string &strName, core::log::Logger& logger);
 
     /**
      * Retrieve Route Manager interface.
@@ -56,5 +56,4 @@
     static const char *const mStrategyComponentName;
     static const char *const mInputSourceComponentName;
     static const char *const mUsageComponentName;
-    static const char *const mVolumeProfileComponentName;
 };
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
index b14d446..348d5e7 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
@@ -14,16 +14,16 @@
  * limitations under the License.
  */
 
-#include "SubsystemLibrary.h"
-#include "NamedElementBuilderTemplate.h"
+#include <Plugin.h>
+#include "LoggingElementBuilderTemplate.h"
 #include "PolicySubsystem.h"
 
 static const char *const POLICY_SUBSYSTEM_NAME = "Policy";
 extern "C"
 {
-void getPOLICYSubsystemBuilder(CSubsystemLibrary *subsystemLibrary)
+void PARAMETER_FRAMEWORK_PLUGIN_ENTRYPOINT_V1(CSubsystemLibrary *subsystemLibrary, core::log::Logger& logger)
 {
     subsystemLibrary->addElementBuilder(POLICY_SUBSYSTEM_NAME,
-                                        new TNamedElementBuilderTemplate<PolicySubsystem>());
+                                        new TLoggingElementBuilderTemplate<PolicySubsystem>(logger));
 }
 }
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
index 1848813..746c3a8 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
@@ -23,16 +23,17 @@
 
 Strategy::Strategy(const string &mappingValue,
                    CInstanceConfigurableElement *instanceConfigurableElement,
-                   const CMappingContext &context)
+                   const CMappingContext &context,
+                   core::log::Logger& logger)
     : CFormattedSubsystemObject(instanceConfigurableElement,
+                                logger,
                                 mappingValue,
                                 MappingKeyAmend1,
                                 (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
                                 context),
       mPolicySubsystem(static_cast<const PolicySubsystem *>(
                            instanceConfigurableElement->getBelongingSubsystem())),
-      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface()),
-      mApplicableOutputDevice(mDefaultApplicableOutputDevice)
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
 {
     mId = static_cast<routing_strategy>(context.getItemAsInteger(MappingKeyIdentifier));
 
@@ -40,12 +41,6 @@
     mPolicyPluginInterface->addStrategy(getFormattedMappingValue(), mId);
 }
 
-bool Strategy::receiveFromHW(string & /*error*/)
-{
-    blackboardWrite(&mApplicableOutputDevice, sizeof(mApplicableOutputDevice));
-    return true;
-}
-
 bool Strategy::sendToHW(string & /*error*/)
 {
     uint32_t applicableOutputDevice;
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
index 9a9b3e4..c02b82c 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
@@ -29,10 +29,10 @@
 public:
     Strategy(const std::string &mappingValue,
              CInstanceConfigurableElement *instanceConfigurableElement,
-             const CMappingContext &context);
+             const CMappingContext &context,
+             core::log::Logger& logger);
 
 protected:
-    virtual bool receiveFromHW(std::string &error);
     virtual bool sendToHW(std::string &error);
 
 private:
@@ -44,6 +44,4 @@
     android::AudioPolicyPluginInterface *mPolicyPluginInterface;
 
     android::routing_strategy mId; /**< strategy identifier to link with audio.h.*/
-    uint32_t mApplicableOutputDevice; /**< applicable output device for this strategy. */
-    static const uint32_t mDefaultApplicableOutputDevice = 0; /**< default output device. */
 };
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
index 575b0bb..c642a23 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
@@ -21,18 +21,13 @@
 using std::string;
 using android::routing_strategy;
 
-Stream::Stream(const string &mappingValue,
-                   CInstanceConfigurableElement *instanceConfigurableElement,
-                   const CMappingContext &context)
-    : CFormattedSubsystemObject(instanceConfigurableElement,
-                                mappingValue,
-                                MappingKeyAmend1,
-                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
-                                context),
+Stream::Stream(const string &/*mappingValue*/,
+               CInstanceConfigurableElement *instanceConfigurableElement,
+               const CMappingContext &context, core::log::Logger &logger)
+    : CSubsystemObject(instanceConfigurableElement, logger),
       mPolicySubsystem(static_cast<const PolicySubsystem *>(
                            instanceConfigurableElement->getBelongingSubsystem())),
-      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface()),
-      mApplicableStrategy(mDefaultApplicableStrategy)
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
 {
     mId = static_cast<audio_stream_type_t>(context.getItemAsInteger(MappingKeyIdentifier));
 
@@ -40,17 +35,17 @@
     mPolicyPluginInterface->addStream(getFormattedMappingValue(), mId);
 }
 
-bool Stream::receiveFromHW(string & /*error*/)
-{
-    blackboardWrite(&mApplicableStrategy, sizeof(mApplicableStrategy));
-    return true;
-}
-
 bool Stream::sendToHW(string & /*error*/)
 {
-    uint32_t applicableStrategy;
-    blackboardRead(&applicableStrategy, sizeof(applicableStrategy));
-    mApplicableStrategy = applicableStrategy;
-    return mPolicyPluginInterface->setStrategyForStream(mId,
-                                              static_cast<routing_strategy>(mApplicableStrategy));
+    Applicable params;
+    blackboardRead(&params, sizeof(params));
+
+    mPolicyPluginInterface->setStrategyForStream(mId,
+                                                 static_cast<routing_strategy>(params.strategy));
+
+    mPolicyPluginInterface->setVolumeProfileForStream(mId,
+                                                      static_cast<audio_stream_type_t>(params.volumeProfile));
+
+    return true;
+
 }
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
index 7d90c36..4a875db 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
@@ -16,7 +16,7 @@
 
 #pragma once
 
-#include "FormattedSubsystemObject.h"
+#include "SubsystemObject.h"
 #include "InstanceConfigurableElement.h"
 #include "MappingContext.h"
 #include <AudioPolicyPluginInterface.h>
@@ -24,15 +24,22 @@
 
 class PolicySubsystem;
 
-class Stream : public CFormattedSubsystemObject
+class Stream : public CSubsystemObject
 {
+private:
+    struct Applicable
+    {
+        uint32_t strategy; /**< applicable strategy for this stream. */
+        uint32_t volumeProfile; /**< applicable strategy for this stream. */
+    } __attribute__((packed));
+
 public:
     Stream(const std::string &mappingValue,
-             CInstanceConfigurableElement *instanceConfigurableElement,
-             const CMappingContext &context);
+           CInstanceConfigurableElement *instanceConfigurableElement,
+           const CMappingContext &context,
+           core::log::Logger& logger);
 
 protected:
-    virtual bool receiveFromHW(std::string &error);
     virtual bool sendToHW(std::string &error);
 
 private:
@@ -42,8 +49,5 @@
      * Interface to communicate with Audio Policy Engine.
      */
     android::AudioPolicyPluginInterface *mPolicyPluginInterface;
-
     audio_stream_type_t mId; /**< stream type identifier to link with audio.h. */
-    uint32_t mApplicableStrategy; /**< applicable strategy for this stream. */
-    static const uint32_t mDefaultApplicableStrategy = 0; /**< default strategy. */
 };
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
index 1916b9b..78199f8 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
@@ -23,16 +23,16 @@
 
 Usage::Usage(const string &mappingValue,
                    CInstanceConfigurableElement *instanceConfigurableElement,
-                   const CMappingContext &context)
+                   const CMappingContext &context, core::log::Logger &logger)
     : CFormattedSubsystemObject(instanceConfigurableElement,
+                                logger,
                                 mappingValue,
                                 MappingKeyAmend1,
                                 (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
                                 context),
       mPolicySubsystem(static_cast<const PolicySubsystem *>(
                            instanceConfigurableElement->getBelongingSubsystem())),
-      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface()),
-      mApplicableStrategy(mDefaultApplicableStrategy)
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
 {
     mId = static_cast<audio_usage_t>(context.getItemAsInteger(MappingKeyIdentifier));
 
@@ -40,17 +40,10 @@
     mPolicyPluginInterface->addUsage(getFormattedMappingValue(), mId);
 }
 
-bool Usage::receiveFromHW(string & /*error*/)
-{
-    blackboardWrite(&mApplicableStrategy, sizeof(mApplicableStrategy));
-    return true;
-}
-
 bool Usage::sendToHW(string & /*error*/)
 {
     uint32_t applicableStrategy;
     blackboardRead(&applicableStrategy, sizeof(applicableStrategy));
-    mApplicableStrategy = applicableStrategy;
     return mPolicyPluginInterface->setStrategyForUsage(mId,
-                                              static_cast<routing_strategy>(mApplicableStrategy));
+                                              static_cast<routing_strategy>(applicableStrategy));
 }
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
index 8e9b638..860204f 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
@@ -28,11 +28,11 @@
 {
 public:
     Usage(const std::string &mappingValue,
-             CInstanceConfigurableElement *instanceConfigurableElement,
-             const CMappingContext &context);
+          CInstanceConfigurableElement *instanceConfigurableElement,
+          const CMappingContext &context,
+          core::log::Logger& logger);
 
 protected:
-    virtual bool receiveFromHW(std::string &error);
     virtual bool sendToHW(std::string &error);
 
 private:
@@ -44,6 +44,4 @@
     android::AudioPolicyPluginInterface *mPolicyPluginInterface;
 
     audio_usage_t mId; /**< usage identifier to link with audio.h. */
-    uint32_t mApplicableStrategy; /**< applicable strategy for this usage. */
-    static const uint32_t mDefaultApplicableStrategy = 0; /**< default strategy. */
 };
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.cpp
deleted file mode 100755
index 5c155c8..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "VolumeProfile.h"
-#include "PolicyMappingKeys.h"
-#include "PolicySubsystem.h"
-#include "ParameterBlockType.h"
-#include <Volume.h>
-#include <math.h>
-
-using std::string;
-
-VolumeProfile::VolumeProfile(const string &mappingValue,
-                             CInstanceConfigurableElement *instanceConfigurableElement,
-                             const CMappingContext &context)
-    : CFormattedSubsystemObject(instanceConfigurableElement,
-                                mappingValue,
-                                MappingKeyAmend1,
-                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
-                                context),
-      mPolicySubsystem(static_cast<const PolicySubsystem *>(
-                           instanceConfigurableElement->getBelongingSubsystem())),
-      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
-{
-    uint32_t categoryKey = context.getItemAsInteger(MappingKeyCategory);
-    if (categoryKey >= Volume::DEVICE_CATEGORY_CNT) {
-        mCategory = Volume::DEVICE_CATEGORY_SPEAKER;
-    } else {
-        mCategory = static_cast<Volume::device_category>(categoryKey);
-    }
-    mId = static_cast<audio_stream_type_t>(context.getItemAsInteger(MappingKeyIdentifier));
-
-    // (no exception support, defer the error)
-    if (instanceConfigurableElement->getType() != CInstanceConfigurableElement::EParameterBlock) {
-        return;
-    }
-    // Get actual element type
-    const CParameterBlockType *parameterType = static_cast<const CParameterBlockType *>(
-                instanceConfigurableElement->getTypeElement());
-    mPoints = parameterType->getArrayLength();
-}
-
-bool VolumeProfile::receiveFromHW(string & /*error*/)
-{
-    return true;
-}
-
-bool VolumeProfile::sendToHW(string & /*error*/)
-{
-    Point points[mPoints];
-    blackboardRead(&points, sizeof(Point) * mPoints);
-
-    VolumeCurvePoints pointsVector;
-    for (size_t i = 0; i < mPoints; i++) {
-        VolumeCurvePoint curvePoint;
-        curvePoint.mIndex = points[i].index;
-        curvePoint.mDBAttenuation = static_cast<float>(points[i].dbAttenuation) /
-                (1UL << gFractional);
-        pointsVector.push_back(curvePoint);
-    }
-    return mPolicyPluginInterface->setVolumeProfileForStream(mId, mCategory, pointsVector);
-}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.h
deleted file mode 100755
index a00ae84..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "FormattedSubsystemObject.h"
-#include "InstanceConfigurableElement.h"
-#include "MappingContext.h"
-#include <Volume.h>
-#include <AudioPolicyPluginInterface.h>
-#include <string>
-
-class PolicySubsystem;
-
-class VolumeProfile : public CFormattedSubsystemObject
-{
-private:
-    struct Point
-    {
-        int index;
-        /** Volume is using FixedPointParameter until float parameters are available. */
-        int16_t dbAttenuation;
-    } __attribute__((packed));
-
-public:
-    VolumeProfile(const std::string &mappingValue,
-                  CInstanceConfigurableElement *instanceConfigurableElement,
-                  const CMappingContext &context);
-
-protected:
-    virtual bool receiveFromHW(std::string &error);
-    virtual bool sendToHW(std::string &error);
-
-private:
-    const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
-
-    /**
-     * Interface to communicate with Audio Policy Engine.
-     */
-    android::AudioPolicyPluginInterface *mPolicyPluginInterface;
-
-    /**
-     * volume profile identifier,  which is in fact a stream type to link with audio.h.
-     */
-    audio_stream_type_t mId;
-
-    size_t mPoints;
-    Volume::device_category mCategory;
-
-    static const uint32_t gFractional = 8; /**< Beware to align with the structure. */
-};
diff --git a/services/audiopolicy/engineconfigurable/src/Collection.h b/services/audiopolicy/engineconfigurable/src/Collection.h
index 8f17b15..b72ded8 100755
--- a/services/audiopolicy/engineconfigurable/src/Collection.h
+++ b/services/audiopolicy/engineconfigurable/src/Collection.h
@@ -47,6 +47,7 @@
 class Collection : public std::map<Key, Element<Key> *>
 {
 private:
+    typedef std::map<Key, Element<Key> *> Base;
     typedef Element<Key> T;
     typedef typename std::map<Key, T *>::iterator CollectionIterator;
     typedef typename std::map<Key, T *>::const_iterator CollectionConstIterator;
@@ -127,7 +128,7 @@
         for (it = (*this).begin(); it != (*this).end(); ++it) {
             delete it->second;
         }
-        (*this).clear();
+        Base::clear();
     }
 
 private:
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 733cdf6..0d18ffa 100755
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -106,27 +106,13 @@
 
 status_t Engine::initCheck()
 {
-    if (mPolicyParameterMgr != NULL && mPolicyParameterMgr->start() != NO_ERROR) {
+    if (mPolicyParameterMgr == NULL || mPolicyParameterMgr->start() != NO_ERROR) {
         ALOGE("%s: could not start Policy PFW", __FUNCTION__);
-        delete mPolicyParameterMgr;
-        mPolicyParameterMgr = NULL;
         return NO_INIT;
     }
     return (mApmObserver != NULL)? NO_ERROR : NO_INIT;
 }
 
-bool Engine::setVolumeProfileForStream(const audio_stream_type_t &streamType,
-                                       Volume::device_category deviceCategory,
-                                       const VolumeCurvePoints &points)
-{
-    Stream *stream = getFromCollection<audio_stream_type_t>(streamType);
-    if (stream == NULL) {
-        ALOGE("%s: stream %d not found", __FUNCTION__, streamType);
-        return false;
-    }
-    return stream->setVolumeProfile(deviceCategory, points) == NO_ERROR;
-}
-
 template <typename Key>
 Element<Key> *Engine::getFromCollection(const Key &key) const
 {
@@ -154,13 +140,6 @@
 
 routing_strategy Engine::ManagerInterfaceImpl::getStrategyForUsage(audio_usage_t usage)
 {
-    const SwAudioOutputCollection &outputs = mPolicyEngine->mApmObserver->getOutputs();
-
-    if (usage == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY &&
-            (outputs.isStreamActive(AUDIO_STREAM_RING) ||
-             outputs.isStreamActive(AUDIO_STREAM_ALARM))) {
-        return STRATEGY_SONIFICATION;
-    }
     return mPolicyEngine->getPropertyForKey<routing_strategy, audio_usage_t>(usage);
 }
 
@@ -185,9 +164,29 @@
             outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
         return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(STRATEGY_MEDIA);
     }
+    if (strategy == STRATEGY_ACCESSIBILITY &&
+        (outputs.isStreamActive(AUDIO_STREAM_RING) || outputs.isStreamActive(AUDIO_STREAM_ALARM))) {
+            // do not route accessibility prompts to a digital output currently configured with a
+            // compressed format as they would likely not be mixed and dropped.
+            // Device For Sonification conf file has HDMI, SPDIF and HDMI ARC unreacheable.
+        return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(
+                    STRATEGY_SONIFICATION);
+    }
     return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(strategy);
 }
 
+bool Engine::PluginInterfaceImpl::setVolumeProfileForStream(const audio_stream_type_t &stream,
+                                                            const audio_stream_type_t &profile)
+{
+    if (mPolicyEngine->setPropertyForKey<audio_stream_type_t, audio_stream_type_t>(stream,
+                                                                                   profile)) {
+        mPolicyEngine->mApmObserver->getVolumeCurves().switchVolumeCurve(profile, stream);
+        return true;
+    }
+    return false;
+}
+
+
 template <typename Property, typename Key>
 bool Engine::setPropertyForKey(const Property &property, const Key &key)
 {
@@ -199,32 +198,6 @@
     return element->template set<Property>(property) == NO_ERROR;
 }
 
-float Engine::volIndexToDb(Volume::device_category category,
-                             audio_stream_type_t streamType,
-                             int indexInUi)
-{
-    Stream *stream = getFromCollection<audio_stream_type_t>(streamType);
-    if (stream == NULL) {
-        ALOGE("%s: Element indexed by key=%d not found", __FUNCTION__, streamType);
-        return 1.0f;
-    }
-    return stream->volIndexToDb(category, indexInUi);
-}
-
-status_t Engine::initStreamVolume(audio_stream_type_t streamType,
-                                           int indexMin, int indexMax)
-{
-    Stream *stream = getFromCollection<audio_stream_type_t>(streamType);
-    if (stream == NULL) {
-        ALOGE("%s: Stream Type %d not found", __FUNCTION__, streamType);
-        return BAD_TYPE;
-    }
-    mApmObserver->getStreamDescriptors().setVolumeIndexMin(streamType, indexMin);
-    mApmObserver->getStreamDescriptors().setVolumeIndexMax(streamType, indexMax);
-
-    return stream->initVolume(indexMin, indexMax);
-}
-
 status_t Engine::setPhoneState(audio_mode_t mode)
 {
     return mPolicyParameterMgr->setPhoneState(mode);
@@ -246,10 +219,17 @@
     return mPolicyParameterMgr->getForceUse(usage);
 }
 
-status_t Engine::setDeviceConnectionState(audio_devices_t devices, audio_policy_dev_state_t state,
-                                          const char *deviceAddress)
+status_t Engine::setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
+                                          audio_policy_dev_state_t /*state*/)
 {
-    return mPolicyParameterMgr->setDeviceConnectionState(devices, state, deviceAddress);
+    if (audio_is_output_device(devDesc->type())) {
+        return mPolicyParameterMgr->setAvailableOutputDevices(
+                    mApmObserver->getAvailableOutputDevices().types());
+    } else if (audio_is_input_device(devDesc->type())) {
+        return mPolicyParameterMgr->setAvailableInputDevices(
+                    mApmObserver->getAvailableInputDevices().types());
+    }
+    return BAD_TYPE;
 }
 
 template <>
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index 6fa7a13..bc5e035 100755
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -86,22 +86,7 @@
         virtual android::status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
                                                            audio_policy_dev_state_t state)
         {
-            return mPolicyEngine->setDeviceConnectionState(devDesc->type(), state,
-                                                           devDesc->mAddress);
-        }
-        virtual status_t initStreamVolume(audio_stream_type_t stream,
-                                                   int indexMin, int indexMax)
-        {
-            return mPolicyEngine->initStreamVolume(stream, indexMin, indexMax);
-        }
-
-        virtual void initializeVolumeCurves(bool /*isSpeakerDrcEnabled*/) {}
-
-        virtual float volIndexToDb(Volume::device_category deviceCategory,
-                                     audio_stream_type_t stream,
-                                     int indexInUi)
-        {
-            return mPolicyEngine->volIndexToDb(deviceCategory, stream, indexInUi);
+            return mPolicyEngine->setDeviceConnectionState(devDesc, state);
         }
 
     private:
@@ -142,11 +127,7 @@
                                                                                            stream);
         }
         virtual bool setVolumeProfileForStream(const audio_stream_type_t &stream,
-                                               Volume::device_category deviceCategory,
-                                               const VolumeCurvePoints &points)
-        {
-            return mPolicyEngine->setVolumeProfileForStream(stream, deviceCategory, points);
-        }
+                                               const audio_stream_type_t &volumeProfile);
 
         virtual bool setStrategyForUsage(const audio_usage_t &usage, routing_strategy strategy)
         {
@@ -172,7 +153,7 @@
     void setObserver(AudioPolicyManagerObserver *observer);
 
     bool setVolumeProfileForStream(const audio_stream_type_t &stream,
-                                   Volume::device_category deviceCategory,
+                                   device_category deviceCategory,
                                    const VolumeCurvePoints &points);
 
     status_t initCheck();
@@ -180,14 +161,8 @@
     audio_mode_t getPhoneState() const;
     status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const;
-    status_t setDeviceConnectionState(audio_devices_t devices, audio_policy_dev_state_t state,
-                                      const char *deviceAddress);
-
-    float volIndexToDb(Volume::device_category category,
-                       audio_stream_type_t stream,
-                       int indexInUi);
-    status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
-
+    status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
+                                      audio_policy_dev_state_t state);
     StrategyCollection mStrategyCollection; /**< Strategies indexed by their enum id. */
     StreamCollection mStreamCollection; /**< Streams indexed by their enum id.  */
     UsageCollection mUsageCollection; /**< Usages indexed by their enum id. */
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.cpp b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
index 9ff1538..ae39fef 100755
--- a/services/audiopolicy/engineconfigurable/src/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
@@ -36,8 +36,7 @@
 
 /**
 * Set the device associated to this source.
-* It checks if the input device is valid but allows to set a NONE device
-* (i.e. only the IN BIT is set).
+* It checks if the input device is valid.
 *
 * @param[in] devices selected for the given input source.
 * @tparam audio_devices_t: Applicable input device for this input source.
@@ -47,7 +46,10 @@
 template <>
 status_t Element<audio_source_t>::set(audio_devices_t devices)
 {
-    if (!audio_is_input_device(devices) && devices != AUDIO_DEVICE_BIT_IN) {
+    if (devices != AUDIO_DEVICE_NONE) {
+        devices |= AUDIO_DEVICE_BIT_IN;
+    }
+    if (!audio_is_input_device(devices)) {
         ALOGE("%s: trying to set an invalid device 0x%X for input source %s",
               __FUNCTION__, devices, getName().c_str());
         return BAD_VALUE;
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.cpp b/services/audiopolicy/engineconfigurable/src/Strategy.cpp
index 847443a..a539914 100755
--- a/services/audiopolicy/engineconfigurable/src/Strategy.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Strategy.cpp
@@ -37,7 +37,7 @@
 
 /**
  * Set the device associated to this strategy.
- * It checks if the output device is valid but allows to set a NONE device
+ * It checks if the output device is valid.
  *
  * @param[in] devices selected for the given strategy.
  *
@@ -46,7 +46,7 @@
 template <>
 status_t Element<routing_strategy>::set<audio_devices_t>(audio_devices_t devices)
 {
-    if (!audio_is_output_devices(devices) && devices != AUDIO_DEVICE_NONE) {
+    if (!audio_is_output_devices(devices) || devices == AUDIO_DEVICE_NONE) {
         ALOGE("%s: trying to set an invalid device 0x%X for strategy %s",
               __FUNCTION__, devices, getName().c_str());
         return BAD_VALUE;
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.cpp b/services/audiopolicy/engineconfigurable/src/Stream.cpp
index bea2c19..0ed364f 100755
--- a/services/audiopolicy/engineconfigurable/src/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Stream.cpp
@@ -62,92 +62,22 @@
     return mApplicableStrategy;
 }
 
-status_t Element<audio_stream_type_t>::setVolumeProfile(Volume::device_category category,
-                                                        const VolumeCurvePoints &points)
+template <>
+status_t Element<audio_stream_type_t>::set<audio_stream_type_t>(audio_stream_type_t volumeProfile)
 {
-    ALOGD("%s: adding volume profile for %s for device category %d, points nb =%d", __FUNCTION__,
-          getName().c_str(), category, points.size());
-    mVolumeProfiles[category] = points;
-
-    for (size_t i = 0; i < points.size(); i++) {
-        ALOGV("%s: %s cat=%d curve index =%d Index=%d dBAttenuation=%f",
-              __FUNCTION__, getName().c_str(), category, i, points[i].mIndex,
-             points[i].mDBAttenuation);
-    }
-    return NO_ERROR;
-}
-
-status_t Element<audio_stream_type_t>::initVolume(int indexMin, int indexMax)
-{
-    ALOGV("initStreamVolume() stream %s, min %d, max %d", getName().c_str(), indexMin, indexMax);
-    if (indexMin < 0 || indexMin >= indexMax) {
-        ALOGW("initStreamVolume() invalid index limits for stream %s, min %d, max %d",
-              getName().c_str(), indexMin, indexMax);
+    if (volumeProfile >= AUDIO_STREAM_CNT) {
         return BAD_VALUE;
     }
-    mIndexMin = indexMin;
-    mIndexMax = indexMax;
-
+    mVolumeProfile = volumeProfile;
+    ALOGD("%s: 0x%X for Stream %s", __FUNCTION__, mVolumeProfile, getName().c_str());
     return NO_ERROR;
 }
 
-float Element<audio_stream_type_t>::volIndexToDb(Volume::device_category deviceCategory,
-                                                   int indexInUi)
+template <>
+audio_stream_type_t Element<audio_stream_type_t>::get<audio_stream_type_t>() const
 {
-    VolumeProfileConstIterator it = mVolumeProfiles.find(deviceCategory);
-    if (it == mVolumeProfiles.end()) {
-        ALOGE("%s: device category %d not found for stream %s", __FUNCTION__, deviceCategory,
-              getName().c_str());
-        return 1.0f;
-    }
-    const VolumeCurvePoints curve = mVolumeProfiles[deviceCategory];
-    if (curve.size() != Volume::VOLCNT) {
-        ALOGE("%s: invalid profile for category %d and for stream %s", __FUNCTION__, deviceCategory,
-              getName().c_str());
-        return 1.0f;
-    }
-
-    // the volume index in the UI is relative to the min and max volume indices for this stream type
-    int nbSteps = 1 + curve[Volume::VOLMAX].mIndex -
-            curve[Volume::VOLMIN].mIndex;
-
-    if (mIndexMax - mIndexMin == 0) {
-        ALOGE("%s: Invalid volume indexes Min=Max=%d", __FUNCTION__, mIndexMin);
-        return 1.0f;
-    }
-    int volIdx = (nbSteps * (indexInUi - mIndexMin)) /
-            (mIndexMax - mIndexMin);
-
-    // find what part of the curve this index volume belongs to, or if it's out of bounds
-    int segment = 0;
-    if (volIdx < curve[Volume::VOLMIN].mIndex) {         // out of bounds
-        return 0.0f;
-    } else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
-        segment = 0;
-    } else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
-        segment = 1;
-    } else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
-        segment = 2;
-    } else {                                                               // out of bounds
-        return 1.0f;
-    }
-
-    // linear interpolation in the attenuation table in dB
-    float decibels = curve[segment].mDBAttenuation +
-            ((float)(volIdx - curve[segment].mIndex)) *
-                ( (curve[segment+1].mDBAttenuation -
-                        curve[segment].mDBAttenuation) /
-                    ((float)(curve[segment+1].mIndex -
-                            curve[segment].mIndex)) );
-
-    ALOGV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
-            curve[segment].mIndex, volIdx,
-            curve[segment+1].mIndex,
-            curve[segment].mDBAttenuation,
-            decibels,
-            curve[segment+1].mDBAttenuation);
-
-    return decibels;
+    ALOGV("%s: 0x%X for Stream %s", __FUNCTION__, mVolumeProfile, getName().c_str());
+    return mVolumeProfile;
 }
 
 } // namespace audio_policy
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.h b/services/audiopolicy/engineconfigurable/src/Stream.h
index 8c39dc6..6902003 100755
--- a/services/audiopolicy/engineconfigurable/src/Stream.h
+++ b/services/audiopolicy/engineconfigurable/src/Stream.h
@@ -18,7 +18,6 @@
 
 #include "Element.h"
 #include "EngineDefinition.h"
-#include <Volume.h>
 #include <RoutingStrategy.h>
 #include <map>
 
@@ -32,17 +31,10 @@
 template <>
 class Element<audio_stream_type_t>
 {
-private:
-    typedef std::map<Volume::device_category, VolumeCurvePoints> VolumeProfiles;
-    typedef VolumeProfiles::iterator VolumeProfileIterator;
-    typedef VolumeProfiles::const_iterator VolumeProfileConstIterator;
-
 public:
     Element(const std::string &name)
         : mName(name),
-          mApplicableStrategy(STRATEGY_MEDIA),
-          mIndexMin(0),
-          mIndexMax(1)
+          mApplicableStrategy(STRATEGY_MEDIA)
     {}
     ~Element() {}
 
@@ -79,12 +71,6 @@
     template <typename Property>
     status_t set(Property property);
 
-    status_t setVolumeProfile(Volume::device_category category, const VolumeCurvePoints &points);
-
-    float volIndexToDb(Volume::device_category deviceCategory, int indexInUi);
-
-    status_t initVolume(int indexMin, int indexMax);
-
 private:
     /* Copy facilities are put private to disable copy. */
     Element(const Element &object);
@@ -95,16 +81,7 @@
 
     routing_strategy mApplicableStrategy; /**< Applicable strategy for this stream. */
 
-    /**
-     * Collection of volume profiles indexed by the stream type.
-     * Volume is the only reason why the stream profile was not removed from policy when introducing
-     * attributes.
-     */
-    VolumeProfiles mVolumeProfiles;
-
-    int mIndexMin;
-
-    int mIndexMax;
+    audio_stream_type_t mVolumeProfile; /**< Volume Profile followed by this stream. */
 };
 
 typedef Element<audio_stream_type_t> Stream;
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index 096f913..f4283a8 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -18,6 +18,8 @@
 LOCAL_STATIC_LIBRARIES := \
     libmedia_helper \
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
 LOCAL_MODULE:= libaudiopolicypfwwrapper
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
 
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index cfe49d4..6872e52 100755
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -15,6 +15,7 @@
  */
 
 #define LOG_TAG "APM::AudioPolicyEngine/PFWWrapper"
+//#define LOG_NDEBUG 0
 
 #include "ParameterManagerWrapper.h"
 #include "audio_policy_criteria_conf.h"
@@ -45,15 +46,13 @@
 public:
     ParameterMgrPlatformConnectorLogger() {}
 
-    virtual void log(bool isWarning, const string &log)
+    virtual void info(const string &log)
     {
-        const static string format("policy-parameter-manager: ");
-
-        if (isWarning) {
-            ALOGW("%s %s", format.c_str(), log.c_str());
-        } else {
-            ALOGD("%s %s", format.c_str(), log.c_str());
-        }
+        ALOGD("policy-parameter-manager: %s", log.c_str());
+    }
+    virtual void warning(const string &log)
+    {
+        ALOGW("policy-parameter-manager: %s", log.c_str());
     }
 };
 
@@ -88,7 +87,6 @@
               __FUNCTION__, gAudioPolicyCriteriaVendorConfFilePath,
               gAudioPolicyCriteriaConfFilePath);
     }
-    ALOGD("%s: ParameterManagerWrapper instantiated!", __FUNCTION__);
 }
 
 ParameterManagerWrapper::~ParameterManagerWrapper()
@@ -118,7 +116,7 @@
 void ParameterManagerWrapper::addCriterionType(const string &typeName, bool isInclusive)
 {
     ALOG_ASSERT(mPolicyCriterionTypes.find(typeName) == mPolicyCriterionTypes.end(),
-                      "CriterionType " << typeName << " already added");
+                      "CriterionType %s already added", typeName.c_str());
     ALOGD("%s: Adding new criterionType %s", __FUNCTION__, typeName.c_str());
 
     mPolicyCriterionTypes[typeName] = mPfwConnector->createSelectionCriterionType(isInclusive);
@@ -130,11 +128,12 @@
     const string &literalValue)
 {
     ALOG_ASSERT(mPolicyCriterionTypes.find(typeName) != mPolicyCriterionTypes.end(),
-                      "CriterionType " << typeName.c_str() << "not found");
+                      "CriterionType %s not found", typeName.c_str());
     ALOGV("%s: Adding new value pair (%d,%s) for criterionType %s", __FUNCTION__,
           numericValue, literalValue.c_str(), typeName.c_str());
     ISelectionCriterionTypeInterface *criterionType = mPolicyCriterionTypes[typeName];
-    criterionType->addValuePair(numericValue, literalValue.c_str());
+    std::string error;
+    criterionType->addValuePair(numericValue, literalValue, error);
 }
 
 void ParameterManagerWrapper::loadCriterionType(cnode *root, bool isInclusive)
@@ -224,8 +223,8 @@
 {
     parameterManagerElementSupported<T>();
     typename std::map<string, T *>::iterator it = elementsMap.find(name);
-    ALOG_ASSERT(it != elementsMap.end(), "Element " << name << " not found");
-    return it->second;
+    ALOG_ASSERT(it != elementsMap.end(), "Element %s not found", name.c_str());
+    return it != elementsMap.end() ? it->second : NULL;
 }
 
 template <typename T>
@@ -233,8 +232,8 @@
 {
     parameterManagerElementSupported<T>();
     typename std::map<string, T *>::const_iterator it = elementsMap.find(name);
-    ALOG_ASSERT(it != elementsMap.end(), "Element " << name << " not found");
-    return it->second;
+    ALOG_ASSERT(it != elementsMap.end(), "Element %s not found", name.c_str());
+    return it != elementsMap.end() ? it->second : NULL;
 }
 
 void ParameterManagerWrapper::loadCriteria(cnode *root)
@@ -254,8 +253,8 @@
 void ParameterManagerWrapper::addCriterion(const string &name, const string &typeName,
                               const string &defaultLiteralValue)
 {
-    ALOG_ASSERT(mPolicyCriteria.find(criterionName) == mPolicyCriteria.end(),
-                "Route Criterion " << criterionName << " already added");
+    ALOG_ASSERT(mPolicyCriteria.find(name) == mPolicyCriteria.end(),
+                "Route Criterion %s already added", name.c_str());
 
     ISelectionCriterionTypeInterface *criterionType =
             getElement<ISelectionCriterionTypeInterface>(typeName, mPolicyCriterionTypes);
@@ -278,7 +277,7 @@
     const char *criterionName = root->name;
 
     ALOG_ASSERT(mPolicyCriteria.find(criterionName) == mPolicyCriteria.end(),
-                      "Criterion " << criterionName << " already added");
+                      "Criterion %s already added", criterionName);
 
     string paramKeyName = "";
     string path = "";
@@ -335,7 +334,12 @@
 
 status_t ParameterManagerWrapper::setPhoneState(audio_mode_t mode)
 {
-    ISelectionCriterionInterface *criterion = mPolicyCriteria[gPhoneStateCriterionTag];
+    ISelectionCriterionInterface *criterion =
+            getElement<ISelectionCriterionInterface>(gPhoneStateCriterionTag, mPolicyCriteria);
+    if (criterion == NULL) {
+        ALOGE("%s: no criterion found for %s", __FUNCTION__, gPhoneStateCriterionTag.c_str());
+        return BAD_VALUE;
+    }
     if (!isValueValidForCriterion(criterion, static_cast<int>(mode))) {
         return BAD_VALUE;
     }
@@ -348,6 +352,10 @@
 {
     const ISelectionCriterionInterface *criterion =
             getElement<ISelectionCriterionInterface>(gPhoneStateCriterionTag, mPolicyCriteria);
+    if (criterion == NULL) {
+        ALOGE("%s: no criterion found for %s", __FUNCTION__, gPhoneStateCriterionTag.c_str());
+        return AUDIO_MODE_NORMAL;
+    }
     return static_cast<audio_mode_t>(criterion->getCriterionState());
 }
 
@@ -359,7 +367,12 @@
         return BAD_VALUE;
     }
 
-    ISelectionCriterionInterface *criterion = mPolicyCriteria[gForceUseCriterionTag[usage]];
+    ISelectionCriterionInterface *criterion =
+            getElement<ISelectionCriterionInterface>(gForceUseCriterionTag[usage], mPolicyCriteria);
+    if (criterion == NULL) {
+        ALOGE("%s: no criterion found for %s", __FUNCTION__, gForceUseCriterionTag[usage].c_str());
+        return BAD_VALUE;
+    }
     if (!isValueValidForCriterion(criterion, static_cast<int>(config))) {
         return BAD_VALUE;
     }
@@ -376,6 +389,10 @@
     }
     const ISelectionCriterionInterface *criterion =
             getElement<ISelectionCriterionInterface>(gForceUseCriterionTag[usage], mPolicyCriteria);
+    if (criterion == NULL) {
+        ALOGE("%s: no criterion found for %s", __FUNCTION__, gForceUseCriterionTag[usage].c_str());
+        return AUDIO_POLICY_FORCE_NONE;
+    }
     return static_cast<audio_policy_forced_cfg_t>(criterion->getCriterionState());
 }
 
@@ -387,41 +404,28 @@
     return interface->getLiteralValue(valueToCheck, literalValue);
 }
 
-status_t ParameterManagerWrapper::setDeviceConnectionState(audio_devices_t devices,
-                                                           audio_policy_dev_state_t state,
-                                                           const char */*deviceAddres*/)
+status_t ParameterManagerWrapper::setAvailableInputDevices(audio_devices_t inputDevices)
 {
-    ISelectionCriterionInterface *criterion = NULL;
-
-    if (audio_is_output_devices(devices)) {
-        criterion = mPolicyCriteria[gOutputDeviceCriterionTag];
-    } else if (devices & AUDIO_DEVICE_BIT_IN) {
-        criterion = mPolicyCriteria[gInputDeviceCriterionTag];
-    } else {
-        return BAD_TYPE;
-    }
+    ISelectionCriterionInterface *criterion =
+            getElement<ISelectionCriterionInterface>(gInputDeviceCriterionTag, mPolicyCriteria);
     if (criterion == NULL) {
-        ALOGE("%s: no criterion found for devices", __FUNCTION__);
+        ALOGE("%s: no criterion found for %s", __FUNCTION__, gInputDeviceCriterionTag.c_str());
         return DEAD_OBJECT;
     }
+    criterion->setCriterionState(inputDevices & ~AUDIO_DEVICE_BIT_IN);
+    applyPlatformConfiguration();
+    return NO_ERROR;
+}
 
-    int32_t previousDevices = criterion->getCriterionState();
-    switch (state)
-    {
-    case AUDIO_POLICY_DEVICE_STATE_AVAILABLE:
-        criterion->setCriterionState(previousDevices |= devices);
-        break;
-
-    case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE:
-        if (devices & AUDIO_DEVICE_BIT_IN) {
-            devices &= ~AUDIO_DEVICE_BIT_IN;
-        }
-        criterion->setCriterionState(previousDevices &= ~devices);
-        break;
-
-    default:
-        return BAD_VALUE;
+status_t ParameterManagerWrapper::setAvailableOutputDevices(audio_devices_t outputDevices)
+{
+    ISelectionCriterionInterface *criterion =
+            getElement<ISelectionCriterionInterface>(gOutputDeviceCriterionTag, mPolicyCriteria);
+    if (criterion == NULL) {
+        ALOGE("%s: no criterion found for %s", __FUNCTION__, gOutputDeviceCriterionTag.c_str());
+        return DEAD_OBJECT;
     }
+    criterion->setCriterionState(outputDevices);
     applyPlatformConfiguration();
     return NO_ERROR;
 }
diff --git a/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h b/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
index 58e7135..31b7e0f 100755
--- a/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
@@ -61,7 +61,8 @@
     [AUDIO_POLICY_FORCE_FOR_RECORD] =               "ForceUseForRecord",
     [AUDIO_POLICY_FORCE_FOR_DOCK] =                 "ForceUseForDock",
     [AUDIO_POLICY_FORCE_FOR_SYSTEM] =               "ForceUseForSystem",
-    [AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] =    "ForceUseForHdmiSystemAudio"
+    [AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] =    "ForceUseForHdmiSystemAudio",
+    [AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND] =     "ForceUseForEncodedSurround"
 };
 
 
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf b/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf
index 5b046a8..043d5a6 100755
--- a/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf
+++ b/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf
@@ -93,6 +93,11 @@
         # audio_policy_forced_config_t from system/audio.h
         #
         ForceUseForHdmiSystemAudioType  0:ForceNone,12:ForceHdmiSystemEnforced
+        #
+        # The values of the mode MUST be aligned with the definition of the
+        # audio_policy_forced_config_t from system/audio_policy.h
+        #
+        ForceUseForEncodedSurroundType  0:ForceNone,13:ForceEncodedSurroundNever,14:ForceEncodedSurroundAlways
     }
 
     Criterion {
@@ -132,6 +137,10 @@
             Type            ForceUseForHdmiSystemAudioType
             Default         ForceNone
         }
+        ForceUseForEncodedSurround {
+            Type            ForceUseForEncodedSurroundType
+            Default         ForceNone
+        }
     }
 }
 
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index 3c5f2c0..4c1acfe 100755
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -103,18 +103,22 @@
     audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const;
 
     /**
-     * Set the connection state of device(s).
-     * It will set the associated policy parameter framework criterion.
+     * Set the available input devices i.e. set the associated policy parameter framework criterion
      *
-     * @param[in] devices mask of devices for which the state has changed.
-     * @param[in] state of availability of this(these) device(s).
-     * @param[in] deviceAddress: the mask might not be enough, as it may represents a type of
-     *            device, so address of the device will help precise identification.
+     * @param[in] inputDevices mask of available input devices.
      *
      * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
      */
-    status_t setDeviceConnectionState(audio_devices_t devices, audio_policy_dev_state_t state,
-                                      const char *deviceAddress);
+    status_t setAvailableInputDevices(audio_devices_t inputDevices);
+
+    /**
+     * Set the available output devices i.e. set the associated policy parameter framework criterion
+     *
+     * @param[in] outputDevices mask of available output devices.
+     *
+     * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
+     */
+    status_t setAvailableOutputDevices(audio_devices_t outputDevices);
 
 private:
     /**
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
index 8d43b89..85d1822 100755
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -8,8 +8,6 @@
 LOCAL_SRC_FILES := \
     src/Engine.cpp \
     src/EngineInstance.cpp \
-    src/Gains.cpp \
-
 
 audio_policy_engine_includes_common := \
     $(LOCAL_PATH)/include \
@@ -31,12 +29,15 @@
     $(call include-path-for, bionic) \
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 LOCAL_MODULE := libaudiopolicyenginedefault
 LOCAL_MODULE_TAGS := optional
+
 LOCAL_STATIC_LIBRARIES := \
     libmedia_helper \
-    libaudiopolicycomponents
+    libaudiopolicycomponents \
+    libxml2
 
 LOCAL_SHARED_LIBRARIES += \
     libcutils \
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 0686414..d31429c 100755
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -25,7 +25,6 @@
 #endif
 
 #include "Engine.h"
-#include "Gains.h"
 #include <AudioPolicyManagerObserver.h>
 #include <AudioPort.h>
 #include <IOProfile.h>
@@ -63,57 +62,6 @@
     return (mApmObserver != NULL) ?  NO_ERROR : NO_INIT;
 }
 
-float Engine::volIndexToDb(Volume::device_category category, audio_stream_type_t streamType,
-                             int indexInUi)
-{
-    const StreamDescriptor &streamDesc = mApmObserver->getStreamDescriptors().valueAt(streamType);
-    return Gains::volIndexToDb(category, streamDesc, indexInUi);
-}
-
-
-status_t Engine::initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
-{
-    ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
-    if (indexMin < 0 || indexMin >= indexMax) {
-        ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d",
-              stream , indexMin, indexMax);
-        return BAD_VALUE;
-    }
-    mApmObserver->getStreamDescriptors().setVolumeIndexMin(stream, indexMin);
-    mApmObserver->getStreamDescriptors().setVolumeIndexMax(stream, indexMax);
-    return NO_ERROR;
-}
-
-void Engine::initializeVolumeCurves(bool isSpeakerDrcEnabled)
-{
-    StreamDescriptorCollection &streams = mApmObserver->getStreamDescriptors();
-
-    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
-        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
-            streams.setVolumeCurvePoint(static_cast<audio_stream_type_t>(i),
-                                         static_cast<Volume::device_category>(j),
-                                         Gains::sVolumeProfiles[i][j]);
-        }
-    }
-
-    // Check availability of DRC on speaker path: if available, override some of the speaker curves
-    if (isSpeakerDrcEnabled) {
-        streams.setVolumeCurvePoint(AUDIO_STREAM_SYSTEM, Volume::DEVICE_CATEGORY_SPEAKER,
-                Gains::sDefaultSystemVolumeCurveDrc);
-        streams.setVolumeCurvePoint(AUDIO_STREAM_RING, Volume::DEVICE_CATEGORY_SPEAKER,
-                Gains::sSpeakerSonificationVolumeCurveDrc);
-        streams.setVolumeCurvePoint(AUDIO_STREAM_ALARM, Volume::DEVICE_CATEGORY_SPEAKER,
-                Gains::sSpeakerSonificationVolumeCurveDrc);
-        streams.setVolumeCurvePoint(AUDIO_STREAM_NOTIFICATION, Volume::DEVICE_CATEGORY_SPEAKER,
-                Gains::sSpeakerSonificationVolumeCurveDrc);
-        streams.setVolumeCurvePoint(AUDIO_STREAM_MUSIC, Volume::DEVICE_CATEGORY_SPEAKER,
-                Gains::sSpeakerMediaVolumeCurveDrc);
-        streams.setVolumeCurvePoint(AUDIO_STREAM_ACCESSIBILITY, Volume::DEVICE_CATEGORY_SPEAKER,
-                Gains::sSpeakerMediaVolumeCurveDrc);
-    }
-}
-
-
 status_t Engine::setPhoneState(audio_mode_t state)
 {
     ALOGV("setPhoneState() state %d", state);
@@ -131,20 +79,14 @@
     // store previous phone state for management of sonification strategy below
     int oldState = mPhoneState;
     mPhoneState = state;
-    StreamDescriptorCollection &streams = mApmObserver->getStreamDescriptors();
-    // are we entering or starting a call
+
     if (!is_state_in_call(oldState) && is_state_in_call(state)) {
         ALOGV("  Entering call in setPhoneState()");
-        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
-            streams.setVolumeCurvePoint(AUDIO_STREAM_DTMF, static_cast<Volume::device_category>(j),
-                                         Gains::sVolumeProfiles[AUDIO_STREAM_VOICE_CALL][j]);
-        }
+        mApmObserver->getVolumeCurves().switchVolumeCurve(AUDIO_STREAM_VOICE_CALL,
+                                                          AUDIO_STREAM_DTMF);
     } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
         ALOGV("  Exiting call in setPhoneState()");
-        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
-            streams.setVolumeCurvePoint(AUDIO_STREAM_DTMF, static_cast<Volume::device_category>(j),
-                                         Gains::sVolumeProfiles[AUDIO_STREAM_DTMF][j]);
-        }
+        mApmObserver->getVolumeCurves().restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
     }
     return NO_ERROR;
 }
@@ -199,13 +141,22 @@
     case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
         if (config != AUDIO_POLICY_FORCE_NONE &&
             config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
-            ALOGW("setForceUse() invalid config %d forHDMI_SYSTEM_AUDIO", config);
+            ALOGW("setForceUse() invalid config %d for HDMI_SYSTEM_AUDIO", config);
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND:
+        if (config != AUDIO_POLICY_FORCE_NONE &&
+                config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER &&
+                config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+            ALOGW("setForceUse() invalid config %d for ENCODED_SURROUND", config);
+            return BAD_VALUE;
         }
         mForceUse[usage] = config;
         break;
     default:
         ALOGW("setForceUse() invalid usage %d", usage);
-        break;
+        break; // TODO return BAD_VALUE?
     }
     return NO_ERROR;
 }
@@ -244,18 +195,9 @@
 
 routing_strategy Engine::getStrategyForUsage(audio_usage_t usage)
 {
-    const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
-
     // usage to strategy mapping
     switch (usage) {
     case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
-                outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
-            return STRATEGY_SONIFICATION;
-        }
-        if (isInCall()) {
-            return STRATEGY_PHONE;
-        }
         return STRATEGY_ACCESSIBILITY;
 
     case AUDIO_USAGE_MEDIA:
@@ -289,11 +231,22 @@
 
 audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
 {
-    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
-    const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
+    DeviceVector availableOutputDevices = mApmObserver->getAvailableOutputDevices();
+    DeviceVector availableInputDevices = mApmObserver->getAvailableInputDevices();
 
     const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
 
+    return getDeviceForStrategyInt(strategy, availableOutputDevices,
+                                   availableInputDevices, outputs);
+}
+
+
+
+audio_devices_t Engine::getDeviceForStrategyInt(routing_strategy strategy,
+                                                DeviceVector availableOutputDevices,
+                                                DeviceVector availableInputDevices,
+                                                const SwAudioOutputCollection &outputs) const
+{
     uint32_t device = AUDIO_DEVICE_NONE;
     uint32_t availableOutputDevicesType = availableOutputDevices.types();
 
@@ -301,22 +254,20 @@
 
     case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
         device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
-        if (!device) {
-            ALOGE("getDeviceForStrategy() no device found for "\
-                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
-        }
         break;
 
     case STRATEGY_SONIFICATION_RESPECTFUL:
         if (isInCall()) {
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+            device = getDeviceForStrategyInt(
+                    STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
         } else if (outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
                 SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
             // while media is playing on a remote device, use the the sonification behavior.
             // Note that we test this usecase before testing if media is playing because
             //   the isStreamActive() method only informs about the activity of a stream, not
             //   if it's for local playback. Note also that we use the same delay between both tests
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+            device = getDeviceForStrategyInt(
+                    STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
             //user "safe" speaker if available instead of normal speaker to avoid triggering
             //other acoustic safety mechanisms for notification
             if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
@@ -324,12 +275,15 @@
                 device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
                 device &= ~AUDIO_DEVICE_OUT_SPEAKER;
             }
-        } else if (outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+        } else if (outputs.isStreamActive(
+                                AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
             // while media is playing (or has recently played), use the same device
-            device = getDeviceForStrategy(STRATEGY_MEDIA);
+            device = getDeviceForStrategyInt(
+                    STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs);
         } else {
             // when media is not playing anymore, fall back on the sonification behavior
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+            device = getDeviceForStrategyInt(
+                    STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
             //user "safe" speaker if available instead of normal speaker to avoid triggering
             //other acoustic safety mechanisms for notification
             if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
@@ -343,7 +297,8 @@
     case STRATEGY_DTMF:
         if (!isInCall()) {
             // when off call, DTMF strategy follows the same rules as MEDIA strategy
-            device = getDeviceForStrategy(STRATEGY_MEDIA);
+            device = getDeviceForStrategyInt(
+                    STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs);
             break;
         }
         // when in call, DTMF and PHONE strategies follow the same rules
@@ -370,8 +325,8 @@
                 availableOutputDevicesType = availPrimaryOutputDevices;
             }
         }
-        // for phone strategy, we first consider the forced use and then the available devices by order
-        // of priority
+        // for phone strategy, we first consider the forced use and then the available devices by
+        // order of priority
         switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
         case AUDIO_POLICY_FORCE_BT_SCO:
             if (!isInCall() || strategy != STRATEGY_DTMF) {
@@ -399,6 +354,8 @@
             if (device) break;
             device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
             if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
+            if (device) break;
             device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
             if (device) break;
             if (!isInCall()) {
@@ -412,11 +369,6 @@
                 if (device) break;
             }
             device = availableOutputDevicesType & AUDIO_DEVICE_OUT_EARPIECE;
-            if (device) break;
-            device = mApmObserver->getDefaultOutputDevice()->type();
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
-            }
             break;
 
         case AUDIO_POLICY_FORCE_SPEAKER:
@@ -440,14 +392,7 @@
                 device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                 if (device) break;
             }
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
-            if (device) break;
             device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
-            if (device) break;
-            device = mApmObserver->getDefaultOutputDevice()->type();
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
-            }
             break;
         }
     break;
@@ -457,7 +402,8 @@
         // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
         // handleIncallSonification().
         if (isInCall()) {
-            device = getDeviceForStrategy(STRATEGY_PHONE);
+            device = getDeviceForStrategyInt(
+                    STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
             break;
         }
         // FALL THROUGH
@@ -471,14 +417,10 @@
         if ((strategy == STRATEGY_SONIFICATION) ||
                 (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
             device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
-            }
         }
         // The second device used for sonification is the same as the device used by media strategy
         // FALL THROUGH
 
-    // FIXME: STRATEGY_ACCESSIBILITY and STRATEGY_REROUTING follow STRATEGY_MEDIA for now
     case STRATEGY_ACCESSIBILITY:
         if (strategy == STRATEGY_ACCESSIBILITY) {
             // do not route accessibility prompts to a digital output currently configured with a
@@ -492,20 +434,35 @@
                     availableOutputDevicesType = availableOutputDevices.types() & ~devices;
                 }
             }
+            availableOutputDevices =
+                    availableOutputDevices.getDevicesFromType(availableOutputDevicesType);
+            if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
+                    outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
+                return getDeviceForStrategyInt(
+                    STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
+            }
+            if (isInCall()) {
+                return getDeviceForStrategyInt(
+                        STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
+            }
         }
+        // For other cases, STRATEGY_ACCESSIBILITY behaves like STRATEGY_MEDIA
         // FALL THROUGH
 
+    // FIXME: STRATEGY_REROUTING follow STRATEGY_MEDIA for now
     case STRATEGY_REROUTING:
     case STRATEGY_MEDIA: {
         uint32_t device2 = AUDIO_DEVICE_NONE;
         if (strategy != STRATEGY_SONIFICATION) {
             // no sonification on remote submix (e.g. WFD)
-            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
+            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                                                 String8("0")) != 0) {
                 device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
             }
         }
         if (isInCall() && (strategy == STRATEGY_MEDIA)) {
-            device = getDeviceForStrategy(STRATEGY_PHONE);
+            device = getDeviceForStrategyInt(
+                    STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
             break;
         }
         if ((device2 == AUDIO_DEVICE_NONE) &&
@@ -571,12 +528,6 @@
                 AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
             device &= ~AUDIO_DEVICE_OUT_SPEAKER;
         }
-
-        if (device) break;
-        device = mApmObserver->getDefaultOutputDevice()->type();
-        if (device == AUDIO_DEVICE_NONE) {
-            ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
-        }
         } break;
 
     default:
@@ -584,6 +535,12 @@
         break;
     }
 
+    if (device == AUDIO_DEVICE_NONE) {
+        ALOGV("getDeviceForStrategy() no device found for strategy %d", strategy);
+        device = mApmObserver->getDefaultOutputDevice()->type();
+        ALOGE_IF(device == AUDIO_DEVICE_NONE,
+                 "getDeviceForStrategy() no default device defined");
+    }
     ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
     return device;
 }
@@ -663,6 +620,7 @@
         break;
 
     case AUDIO_SOURCE_VOICE_RECOGNITION:
+    case AUDIO_SOURCE_UNPROCESSED:
     case AUDIO_SOURCE_HOTWORD:
         if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
                 availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
@@ -702,6 +660,14 @@
         ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
         break;
     }
+    if (device == AUDIO_DEVICE_NONE) {
+        ALOGV("getDeviceForInputSource() no device found for source %d", inputSource);
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_STUB) {
+            device = AUDIO_DEVICE_IN_STUB;
+        }
+        ALOGE_IF(device == AUDIO_DEVICE_NONE,
+                 "getDeviceForInputSource() no default device defined");
+    }
     ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
     return device;
 }
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 56a4748..606ad28 100755
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -18,7 +18,6 @@
 
 
 #include "AudioPolicyManagerInterface.h"
-#include "Gains.h"
 #include <AudioGain.h>
 #include <policy.h>
 
@@ -93,19 +92,6 @@
         {
             return NO_ERROR;
         }
-        virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
-        {
-            return mPolicyEngine->initStreamVolume(stream, indexMin, indexMax);
-        }
-        virtual void initializeVolumeCurves(bool isSpeakerDrcEnabled)
-        {
-            return mPolicyEngine->initializeVolumeCurves(isSpeakerDrcEnabled);
-        }
-        virtual float volIndexToDb(Volume::device_category deviceCategory,
-                                     audio_stream_type_t stream,int indexInUi)
-        {
-            return mPolicyEngine->volIndexToDb(deviceCategory, stream, indexInUi);
-        }
     private:
         Engine *mPolicyEngine;
     } mManagerInterface;
@@ -139,13 +125,11 @@
     routing_strategy getStrategyForStream(audio_stream_type_t stream);
     routing_strategy getStrategyForUsage(audio_usage_t usage);
     audio_devices_t getDeviceForStrategy(routing_strategy strategy) const;
+    audio_devices_t getDeviceForStrategyInt(routing_strategy strategy,
+                                            DeviceVector availableOutputDevices,
+                                            DeviceVector availableInputDevices,
+                                            const SwAudioOutputCollection &outputs) const;
     audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
-
-    float volIndexToDb(Volume::device_category category,
-                         audio_stream_type_t stream, int indexInUi);
-    status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
-    void initializeVolumeCurves(bool isSpeakerDrcEnabled);
-
     audio_mode_t mPhoneState;  /**< current phone state. */
 
     /** current forced use configuration. */
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 5ff1c0b..00fd05a 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "APM::AudioPolicyManager"
+#define LOG_TAG "APM_AudioPolicyManager"
 //#define LOG_NDEBUG 0
 
 //#define VERY_VERBOSE_LOGGING
@@ -24,6 +24,8 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
+#define AUDIO_POLICY_XML_CONFIG_FILE "/system/etc/audio_policy_configuration.xml"
+
 #include <inttypes.h>
 #include <math.h>
 
@@ -37,8 +39,12 @@
 #include <media/AudioPolicyHelper.h>
 #include <soundtrigger/SoundTrigger.h>
 #include "AudioPolicyManager.h"
-#include "audio_policy_conf.h"
+#ifndef USE_XML_AUDIO_POLICY_CONF
 #include <ConfigParsingUtils.h>
+#include <StreamDescriptor.h>
+#endif
+#include <Serializer.h>
+#include "TypeConverter.h"
 #include <policy.h>
 
 namespace android {
@@ -190,6 +196,10 @@
             }
         }
 
+        if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
+            cleanUpForDevice(devDesc);
+        }
+
         mpClientInterface->onAudioPortListUpdate();
         return NO_ERROR;
     }  // end if is output device
@@ -266,6 +276,10 @@
             updateCallRouting(newDevice);
         }
 
+        if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
+            cleanUpForDevice(devDesc);
+        }
+
         mpClientInterface->onAudioPortListUpdate();
         return NO_ERROR;
     } // end if is input device
@@ -277,7 +291,15 @@
 audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device,
                                                                       const char *device_address)
 {
-    sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(device, device_address, "");
+    sp<DeviceDescriptor> devDesc =
+            mHwModules.getDeviceDescriptor(device, device_address, "",
+                                           (strlen(device_address) != 0)/*matchAddress*/);
+
+    if (devDesc == 0) {
+        ALOGW("getDeviceConnectionState() undeclared device, type %08x, address: %s",
+              device, device_address);
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    }
 
     DeviceVector *deviceVector;
 
@@ -289,15 +311,14 @@
         ALOGW("getDeviceConnectionState() invalid device type %08x", device);
         return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     }
-    return deviceVector->getDeviceConnectionState(devDesc);
+
+    return (deviceVector->getDevice(device, String8(device_address)) != 0) ?
+            AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
 }
 
 void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs)
 {
     bool createTxPatch = false;
-    struct audio_patch patch;
-    patch.num_sources = 1;
-    patch.num_sinks = 1;
     status_t status;
     audio_patch_handle_t afPatchHandle;
     DeviceVector deviceList;
@@ -330,8 +351,11 @@
                 == AUDIO_DEVICE_NONE) {
             createTxPatch = true;
         }
-    } else {
-        // create RX path audio patch
+    } else { // create RX path audio patch
+        struct audio_patch patch;
+
+        patch.num_sources = 1;
+        patch.num_sinks = 1;
         deviceList = mAvailableOutputDevices.getDevicesFromType(rxDevice);
         ALOG_ASSERT(!deviceList.isEmpty(),
                     "updateCallRouting() selected device not in output device list");
@@ -370,9 +394,9 @@
         }
         createTxPatch = true;
     }
-    if (createTxPatch) {
-
+    if (createTxPatch) { // create TX path audio patch
         struct audio_patch patch;
+
         patch.num_sources = 1;
         patch.num_sinks = 1;
         deviceList = mAvailableInputDevices.getDevicesFromType(txDevice);
@@ -410,7 +434,9 @@
         if (activeInput != 0) {
             sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
             if (activeDesc->getModuleHandle() == txSourceDeviceDesc->getModuleHandle()) {
-                audio_session_t activeSession = activeDesc->mSessions.itemAt(0);
+                //FIXME: consider all active sessions
+                AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
+                audio_session_t activeSession = activeSessions.keyAt(0);
                 stopInput(activeInput, activeSession);
                 releaseInput(activeInput, activeSession);
             }
@@ -443,10 +469,7 @@
     // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(oldState)) {
         ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-            if (stream == AUDIO_STREAM_PATCH) {
-                continue;
-            }
+        for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
             handleIncallSonification((audio_stream_type_t)stream, false, true);
         }
 
@@ -522,10 +545,7 @@
     // pertaining to sonification strategy see handleIncallSonification()
     if (isStateInCall(state)) {
         ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-            if (stream == AUDIO_STREAM_PATCH) {
-                continue;
-            }
+        for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
             handleIncallSonification((audio_stream_type_t)stream, true, true);
         }
 
@@ -563,6 +583,7 @@
     checkA2dpSuspend();
     checkOutputForAllStrategies();
     updateDevicesAndOutputs();
+
     if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
         audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
         updateCallRouting(newDevice);
@@ -583,7 +604,7 @@
         sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
         audio_devices_t newDevice = getNewInputDevice(activeInput);
         // Force new input selection if the new device can not be reached via current input
-        if (activeDesc->mProfile->mSupportedDevices.types() & (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
+        if (activeDesc->mProfile->getSupportedDevices().types() & (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
             setInputDevice(activeInput, newDevice);
         } else {
             closeInput(activeInput);
@@ -628,15 +649,15 @@
                 continue;
             }
             // reject profiles not corresponding to a device currently available
-            if ((mAvailableOutputDevices.types() & curProfile->mSupportedDevices.types()) == 0) {
+            if ((mAvailableOutputDevices.types() & curProfile->getSupportedDevicesType()) == 0) {
                 continue;
             }
             // if several profiles are compatible, give priority to one with offload capability
-            if (profile != 0 && ((curProfile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
+            if (profile != 0 && ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
                 continue;
             }
             profile = curProfile;
-            if ((profile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+            if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
                 break;
             }
         }
@@ -690,9 +711,9 @@
         stream_type_to_audio_attributes(*stream, &attributes);
     }
     sp<SwAudioOutputDescriptor> desc;
-    if (mPolicyMixes.getOutputForAttr(attributes, desc) == NO_ERROR) {
+    if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
         ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
-        if (!audio_is_linear_pcm(format)) {
+        if (!audio_has_proportional_frames(format)) {
             return BAD_VALUE;
         }
         *stream = streamTypefromAttributesInt(&attributes);
@@ -827,20 +848,20 @@
     // skip direct output selection if the request can obviously be attached to a mixed output
     // and not explicitly requested
     if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
-            audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE &&
+            audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX &&
             audio_channel_count_from_out_mask(channelMask) <= 2) {
         goto non_direct_output;
     }
 
-    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
-    // creating an offloaded track and tearing it down immediately after start when audioflinger
-    // detects there is an active non offloadable effect.
+    // Do not allow offloading if one non offloadable effect is enabled or MasterMono is enabled.
+    // This prevents creating an offloaded track and tearing it down immediately after start
+    // when audioflinger detects there is an active non offloadable effect.
     // FIXME: We should check the audio session here but we do not have it in this context.
     // This may prevent offloading in rare situations where effects are left active by apps
     // in the background.
 
     if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
-            !mEffects.isNonOffloadableEffectEnabled()) {
+            !(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
         profile = getProfileForDirectOutput(device,
                                            samplingRate,
                                            format,
@@ -857,7 +878,7 @@
                 outputDesc = desc;
                 // reuse direct output if currently open and configured with same parameters
                 if ((samplingRate == outputDesc->mSamplingRate) &&
-                        (format == outputDesc->mFormat) &&
+                        audio_formats_match(format, outputDesc->mFormat) &&
                         (channelMask == outputDesc->mChannelMask)) {
                     outputDesc->mDirectOpenCount++;
                     ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
@@ -873,7 +894,7 @@
         // if the selected profile is offloaded and no offload info was specified,
         // create a default one
         audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
-        if ((profile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
+        if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
             flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
             defaultOffloadInfo.sample_rate = samplingRate;
             defaultOffloadInfo.channel_mask = channelMask;
@@ -908,7 +929,7 @@
         // only accept an output with the requested parameters
         if (status != NO_ERROR ||
             (samplingRate != 0 && samplingRate != config.sample_rate) ||
-            (format != AUDIO_FORMAT_DEFAULT && format != config.format) ||
+            (format != AUDIO_FORMAT_DEFAULT && !audio_formats_match(format, config.format)) ||
             (channelMask != 0 && channelMask != config.channel_mask)) {
             ALOGV("getOutput() failed opening direct output: output %d samplingRate %d %d,"
                     "format %d %d, channelMask %04x %04x", output, samplingRate,
@@ -918,7 +939,7 @@
                 mpClientInterface->closeOutput(output);
             }
             // fall back to mixer output if possible when the direct output could not be open
-            if (audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE) {
+            if (audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX) {
                 goto non_direct_output;
             }
             return AUDIO_IO_HANDLE_NONE;
@@ -943,6 +964,13 @@
     }
 
 non_direct_output:
+
+    // A request for HW A/V sync cannot fallback to a mixed output because time
+    // stamps are embedded in audio data
+    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
+        return AUDIO_IO_HANDLE_NONE;
+    }
+
     // ignoring channel mask due to downmix capability in mixer
 
     // open a non direct output
@@ -973,8 +1001,9 @@
     // devices (the list was previously build by getOutputsForDevice()).
     // The priority is as follows:
     // 1: the output with the highest number of requested policy flags
-    // 2: the primary output
-    // 3: the first output in the list
+    // 2: the output with the bit depth the closest to the requested one
+    // 3: the primary output
+    // 4: the first output in the list
 
     if (outputs.size() == 0) {
         return 0;
@@ -984,8 +1013,11 @@
     }
 
     int maxCommonFlags = 0;
-    audio_io_handle_t outputFlags = 0;
-    audio_io_handle_t outputPrimary = 0;
+    audio_io_handle_t outputForFlags = 0;
+    audio_io_handle_t outputForPrimary = 0;
+    audio_io_handle_t outputForFormat = 0;
+    audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
+    audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
 
     for (size_t i = 0; i < outputs.size(); i++) {
         sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
@@ -993,31 +1025,48 @@
             // if a valid format is specified, skip output if not compatible
             if (format != AUDIO_FORMAT_INVALID) {
                 if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-                    if (format != outputDesc->mFormat) {
+                    if (!audio_formats_match(format, outputDesc->mFormat)) {
                         continue;
                     }
                 } else if (!audio_is_linear_pcm(format)) {
                     continue;
                 }
+                if (AudioPort::isBetterFormatMatch(
+                        outputDesc->mFormat, bestFormat, format)) {
+                    outputForFormat = outputs[i];
+                    bestFormat = outputDesc->mFormat;
+                }
             }
 
-            int commonFlags = popcount(outputDesc->mProfile->mFlags & flags);
-            if (commonFlags > maxCommonFlags) {
-                outputFlags = outputs[i];
-                maxCommonFlags = commonFlags;
+            int commonFlags = popcount(outputDesc->mProfile->getFlags() & flags);
+            if (commonFlags >= maxCommonFlags) {
+                if (commonFlags == maxCommonFlags) {
+                    if (AudioPort::isBetterFormatMatch(
+                            outputDesc->mFormat, bestFormatForFlags, format)) {
+                        outputForFlags = outputs[i];
+                        bestFormatForFlags = outputDesc->mFormat;
+                    }
+                } else {
+                    outputForFlags = outputs[i];
+                    maxCommonFlags = commonFlags;
+                    bestFormatForFlags = outputDesc->mFormat;
+                }
                 ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags);
             }
-            if (outputDesc->mProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
-                outputPrimary = outputs[i];
+            if (outputDesc->mProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
+                outputForPrimary = outputs[i];
             }
         }
     }
 
-    if (outputFlags != 0) {
-        return outputFlags;
+    if (outputForFlags != 0) {
+        return outputForFlags;
     }
-    if (outputPrimary != 0) {
-        return outputPrimary;
+    if (outputForFormat != 0) {
+        return outputForFormat;
+    }
+    if (outputForPrimary != 0) {
+        return outputForPrimary;
     }
 
     return outputs[0];
@@ -1041,8 +1090,16 @@
     mOutputRoutes.incRouteActivity(session);
 
     audio_devices_t newDevice;
+    AudioMix *policyMix = NULL;
+    const char *address = NULL;
     if (outputDesc->mPolicyMix != NULL) {
-        newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+        policyMix = outputDesc->mPolicyMix;
+        address = policyMix->mDeviceAddress.string();
+        if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+            newDevice = policyMix->mDeviceType;
+        } else {
+            newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+        }
     } else if (mOutputRoutes.hasRouteChanged(session)) {
         newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
         checkStrategyRoute(getStrategy(stream), output);
@@ -1052,7 +1109,7 @@
 
     uint32_t delayMs = 0;
 
-    status_t status = startSource(outputDesc, stream, newDevice, &delayMs);
+    status_t status = startSource(outputDesc, stream, newDevice, address, &delayMs);
 
     if (status != NO_ERROR) {
         mOutputRoutes.decRouteActivity(session);
@@ -1060,11 +1117,11 @@
     }
     // Automatically enable the remote submix input when output is started on a re routing mix
     // of type MIX_TYPE_RECORDERS
-    if (audio_is_remote_submix_device(newDevice) && outputDesc->mPolicyMix != NULL &&
-            outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
+    if (audio_is_remote_submix_device(newDevice) && policyMix != NULL &&
+            policyMix->mMixType == MIX_TYPE_RECORDERS) {
             setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                    outputDesc->mPolicyMix->mRegistrationId,
+                    address,
                     "remote-submix");
     }
 
@@ -1078,6 +1135,7 @@
 status_t AudioPolicyManager::startSource(sp<AudioOutputDescriptor> outputDesc,
                                              audio_stream_type_t stream,
                                              audio_devices_t device,
+                                             const char *address,
                                              uint32_t *delayMs)
 {
     // cannot start playback of STREAM_TTS if any other output is being used
@@ -1134,7 +1192,7 @@
                 }
             }
         }
-        uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force);
+        uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
 
         // handle special case for sonification while in call
         if (isInCall()) {
@@ -1143,7 +1201,7 @@
 
         // apply volume rules for current stream and device if necessary
         checkAndSetVolume(stream,
-                          mStreams.valueFor(stream).getVolumeIndex(device),
+                          mVolumeCurves->getVolumeIndex(stream, device),
                           outputDesc,
                           device);
 
@@ -1181,7 +1239,7 @@
                 outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
             setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                    outputDesc->mPolicyMix->mRegistrationId,
+                    outputDesc->mPolicyMix->mDeviceAddress,
                     "remote-submix");
         }
     }
@@ -1321,7 +1379,6 @@
     audio_devices_t device;
     // handle legacy remote submix case where the address was not always specified
     String8 address = String8("");
-    bool isSoundTrigger = false;
     audio_source_t inputSource = attr->source;
     audio_source_t halInputSource;
     AudioMix *policyMix = NULL;
@@ -1357,7 +1414,7 @@
             return BAD_VALUE;
         }
         if (policyMix != NULL) {
-            address = policyMix->mRegistrationId;
+            address = policyMix->mDeviceAddress;
             if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
                 // there is an external policy, but this input is attached to a mix of recorders,
                 // meaning it receives audio injected into the framework, so the recorder doesn't
@@ -1376,37 +1433,52 @@
         } else {
             *inputType = API_INPUT_LEGACY;
         }
-        // adapt channel selection to input source
-        switch (inputSource) {
-        case AUDIO_SOURCE_VOICE_UPLINK:
-            channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK;
-            break;
-        case AUDIO_SOURCE_VOICE_DOWNLINK:
-            channelMask = AUDIO_CHANNEL_IN_VOICE_DNLINK;
-            break;
-        case AUDIO_SOURCE_VOICE_CALL:
-            channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK;
-            break;
-        default:
-            break;
-        }
-        if (inputSource == AUDIO_SOURCE_HOTWORD) {
-            ssize_t index = mSoundTriggerSessions.indexOfKey(session);
-            if (index >= 0) {
-                *input = mSoundTriggerSessions.valueFor(session);
-                isSoundTrigger = true;
-                flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
-                ALOGV("SoundTrigger capture on session %d input %d", session, *input);
-            } else {
-                halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
-            }
+
+    }
+
+    *input = getInputForDevice(device, address, session, uid, inputSource,
+                               samplingRate, format, channelMask, flags,
+                               policyMix);
+    if (*input == AUDIO_IO_HANDLE_NONE) {
+        mInputRoutes.removeRoute(session);
+        return INVALID_OPERATION;
+    }
+    ALOGV("getInputForAttr() returns input type = %d", *inputType);
+    return NO_ERROR;
+}
+
+
+audio_io_handle_t AudioPolicyManager::getInputForDevice(audio_devices_t device,
+                                                        String8 address,
+                                                        audio_session_t session,
+                                                        uid_t uid,
+                                                        audio_source_t inputSource,
+                                                        uint32_t samplingRate,
+                                                        audio_format_t format,
+                                                        audio_channel_mask_t channelMask,
+                                                        audio_input_flags_t flags,
+                                                        AudioMix *policyMix)
+{
+    audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+    audio_source_t halInputSource = inputSource;
+    bool isSoundTrigger = false;
+
+    if (inputSource == AUDIO_SOURCE_HOTWORD) {
+        ssize_t index = mSoundTriggerSessions.indexOfKey(session);
+        if (index >= 0) {
+            input = mSoundTriggerSessions.valueFor(session);
+            isSoundTrigger = true;
+            flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
+            ALOGV("SoundTrigger capture on session %d input %d", session, input);
+        } else {
+            halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
         }
     }
 
     // find a compatible input profile (not necessarily identical in parameters)
     sp<IOProfile> profile;
     // samplingRate and flags may be updated by getInputProfile
-    uint32_t profileSamplingRate = samplingRate;
+    uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
     audio_format_t profileFormat = format;
     audio_channel_mask_t profileChannelMask = channelMask;
     audio_input_flags_t profileFlags = flags;
@@ -1419,25 +1491,67 @@
         } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
             profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
         } else { // fail
-            ALOGW("getInputForAttr() could not find profile for device 0x%X, samplingRate %u,"
-                    "format %#x, channelMask 0x%X, flags %#x",
+            ALOGW("getInputForDevice() could not find profile for device 0x%X,"
+                  "samplingRate %u, format %#x, channelMask 0x%X, flags %#x",
                     device, samplingRate, format, channelMask, flags);
-            return BAD_VALUE;
+            return input;
         }
     }
+    // Pick input sampling rate if not specified by client
+    if (samplingRate == 0) {
+        samplingRate = profileSamplingRate;
+    }
 
     if (profile->getModuleHandle() == 0) {
         ALOGE("getInputForAttr(): HW module %s not opened", profile->getModuleName());
-        return NO_INIT;
+        return input;
     }
 
+    sp<AudioSession> audioSession = new AudioSession(session,
+                                                              inputSource,
+                                                              format,
+                                                              samplingRate,
+                                                              channelMask,
+                                                              flags,
+                                                              uid,
+                                                              isSoundTrigger,
+                                                              policyMix, mpClientInterface);
+
+// TODO enable input reuse
+#if 0
+    // reuse an open input if possible
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
+        // reuse input if it shares the same profile and same sound trigger attribute
+        if (profile == desc->mProfile &&
+            isSoundTrigger == desc->isSoundTrigger()) {
+
+            sp<AudioSession> as = desc->getAudioSession(session);
+            if (as != 0) {
+                // do not allow unmatching properties on same session
+                if (as->matches(audioSession)) {
+                    as->changeOpenCount(1);
+                } else {
+                    ALOGW("getInputForDevice() record with different attributes"
+                          " exists for session %d", session);
+                    return input;
+                }
+            } else {
+                desc->addAudioSession(session, audioSession);
+            }
+            ALOGV("getInputForDevice() reusing input %d", mInputs.keyAt(i));
+            return mInputs.keyAt(i);
+        }
+    }
+#endif
+
     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
     config.sample_rate = profileSamplingRate;
     config.channel_mask = profileChannelMask;
     config.format = profileFormat;
 
     status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
-                                                   input,
+                                                   &input,
                                                    &config,
                                                    &device,
                                                    address,
@@ -1445,37 +1559,31 @@
                                                    profileFlags);
 
     // only accept input with the exact requested set of parameters
-    if (status != NO_ERROR || *input == AUDIO_IO_HANDLE_NONE ||
+    if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
         (profileSamplingRate != config.sample_rate) ||
-        (profileFormat != config.format) ||
+        !audio_formats_match(profileFormat, config.format) ||
         (profileChannelMask != config.channel_mask)) {
-        ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d,"
-                " channelMask %x",
+        ALOGW("getInputForAttr() failed opening input: samplingRate %d"
+              ", format %d, channelMask %x",
                 samplingRate, format, channelMask);
-        if (*input != AUDIO_IO_HANDLE_NONE) {
-            mpClientInterface->closeInput(*input);
+        if (input != AUDIO_IO_HANDLE_NONE) {
+            mpClientInterface->closeInput(input);
         }
-        return BAD_VALUE;
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
-    inputDesc->mInputSource = inputSource;
-    inputDesc->mRefCount = 0;
-    inputDesc->mOpenRefCount = 1;
     inputDesc->mSamplingRate = profileSamplingRate;
     inputDesc->mFormat = profileFormat;
     inputDesc->mChannelMask = profileChannelMask;
     inputDesc->mDevice = device;
-    inputDesc->mSessions.add(session);
-    inputDesc->mIsSoundTrigger = isSoundTrigger;
     inputDesc->mPolicyMix = policyMix;
+    inputDesc->addAudioSession(session, audioSession);
 
-    ALOGV("getInputForAttr() returns input type = %d", *inputType);
-
-    addInput(*input, inputDesc);
+    addInput(input, inputDesc);
     mpClientInterface->onAudioPortListUpdate();
 
-    return NO_ERROR;
+    return input;
 }
 
 status_t AudioPolicyManager::startInput(audio_io_handle_t input,
@@ -1489,8 +1597,8 @@
     }
     sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
 
-    index = inputDesc->mSessions.indexOf(session);
-    if (index < 0) {
+    sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
+    if (audioSession == 0) {
         ALOGW("startInput() unknown session %d on input %d", session, input);
         return BAD_VALUE;
     }
@@ -1505,11 +1613,14 @@
             // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
             // otherwise the active input continues and the new input cannot be started.
             sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
-            if ((activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) &&
+            if ((activeDesc->inputSource() == AUDIO_SOURCE_HOTWORD) &&
                     !activeDesc->hasPreemptedSession(session)) {
                 ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
-                audio_session_t activeSession = activeDesc->mSessions.itemAt(0);
-                SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
+                //FIXME: consider all active sessions
+                AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
+                audio_session_t activeSession = activeSessions.keyAt(0);
+                SortedVector<audio_session_t> sessions =
+                                           activeDesc->getPreemptedSessions();
                 sessions.add(activeSession);
                 inputDesc->setPreemptedSessions(sessions);
                 stopInput(activeInput, activeSession);
@@ -1533,11 +1644,11 @@
     // Routing?
     mInputRoutes.incRouteActivity(session);
 
-    if (inputDesc->mRefCount == 0 || mInputRoutes.hasRouteChanged(session)) {
+    if (!inputDesc->isActive() || mInputRoutes.hasRouteChanged(session)) {
         // if input maps to a dynamic policy with an activity listener, notify of state change
         if ((inputDesc->mPolicyMix != NULL)
                 && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
-            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mRegistrationId,
+            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
                     MIX_STATE_MIXING);
         }
 
@@ -1554,7 +1665,7 @@
             if (inputDesc->mPolicyMix == NULL) {
                 address = String8("0");
             } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
-                address = inputDesc->mPolicyMix->mRegistrationId;
+                address = inputDesc->mPolicyMix->mDeviceAddress;
             }
             if (address != "") {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
@@ -1564,9 +1675,9 @@
         }
     }
 
-    ALOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);
+    ALOGV("AudioPolicyManager::startInput() input source = %d", audioSession->inputSource());
 
-    inputDesc->mRefCount++;
+    audioSession->changeActiveCount(1);
     return NO_ERROR;
 }
 
@@ -1581,27 +1692,27 @@
     }
     sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
 
-    index = inputDesc->mSessions.indexOf(session);
+    sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
     if (index < 0) {
         ALOGW("stopInput() unknown session %d on input %d", session, input);
         return BAD_VALUE;
     }
 
-    if (inputDesc->mRefCount == 0) {
+    if (audioSession->activeCount() == 0) {
         ALOGW("stopInput() input %d already stopped", input);
         return INVALID_OPERATION;
     }
 
-    inputDesc->mRefCount--;
+    audioSession->changeActiveCount(-1);
 
     // Routing?
     mInputRoutes.decRouteActivity(session);
 
-    if (inputDesc->mRefCount == 0) {
+    if (!inputDesc->isActive()) {
         // if input maps to a dynamic policy with an activity listener, notify of state change
         if ((inputDesc->mPolicyMix != NULL)
                 && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
-            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mRegistrationId,
+            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
                     MIX_STATE_IDLE);
         }
 
@@ -1612,7 +1723,7 @@
             if (inputDesc->mPolicyMix == NULL) {
                 address = String8("0");
             } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
-                address = inputDesc->mPolicyMix->mRegistrationId;
+                address = inputDesc->mPolicyMix->mDeviceAddress;
             }
             if (address != "") {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
@@ -1634,6 +1745,7 @@
 void AudioPolicyManager::releaseInput(audio_io_handle_t input,
                                       audio_session_t session)
 {
+
     ALOGV("releaseInput() %d", input);
     ssize_t index = mInputs.indexOfKey(input);
     if (index < 0) {
@@ -1647,18 +1759,23 @@
     sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
     ALOG_ASSERT(inputDesc != 0);
 
-    index = inputDesc->mSessions.indexOf(session);
+    sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
     if (index < 0) {
         ALOGW("releaseInput() unknown session %d on input %d", session, input);
         return;
     }
-    inputDesc->mSessions.remove(session);
-    if (inputDesc->mOpenRefCount == 0) {
-        ALOGW("releaseInput() invalid open ref count %d", inputDesc->mOpenRefCount);
+
+    if (audioSession->openCount() == 0) {
+        ALOGW("releaseInput() invalid open count %d on session %d",
+              audioSession->openCount(), session);
         return;
     }
-    inputDesc->mOpenRefCount--;
-    if (inputDesc->mOpenRefCount > 0) {
+
+    if (audioSession->changeOpenCount(-1) == 0) {
+        inputDesc->removeAudioSession(session);
+    }
+
+    if (inputDesc->getOpenRefCount() > 0) {
         ALOGV("releaseInput() exit > 0");
         return;
     }
@@ -1673,7 +1790,7 @@
 
     for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
         sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
-        ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+        ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
         if (patch_index >= 0) {
             sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
             status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -1683,6 +1800,7 @@
         mpClientInterface->closeInput(mInputs.keyAt(input_index));
     }
     mInputs.clear();
+    SoundTrigger::setCaptureState(false);
     nextAudioPortGeneration();
 
     if (patchRemoved) {
@@ -1695,10 +1813,14 @@
                                             int indexMax)
 {
     ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
-    mEngine->initStreamVolume(stream, indexMin, indexMax);
-    //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
-    if (stream == AUDIO_STREAM_MUSIC) {
-        mEngine->initStreamVolume(AUDIO_STREAM_ACCESSIBILITY, indexMin, indexMax);
+    mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
+
+    // initialize other private stream volumes which follow this one
+    for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+        if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+            continue;
+        }
+        mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
     }
 }
 
@@ -1707,8 +1829,8 @@
                                                   audio_devices_t device)
 {
 
-    if ((index < mStreams.valueFor(stream).getVolumeIndexMin()) ||
-            (index > mStreams.valueFor(stream).getVolumeIndexMax())) {
+    if ((index < mVolumeCurves->getVolumeIndexMin(stream)) ||
+            (index > mVolumeCurves->getVolumeIndexMax(stream))) {
         return BAD_VALUE;
     }
     if (!audio_is_output_device(device)) {
@@ -1716,48 +1838,59 @@
     }
 
     // Force max volume if stream cannot be muted
-    if (!mStreams.canBeMuted(stream)) index = mStreams.valueFor(stream).getVolumeIndexMax();
+    if (!mVolumeCurves->canBeMuted(stream)) index = mVolumeCurves->getVolumeIndexMax(stream);
 
-    ALOGV("setStreamVolumeIndex() stream %d, device %04x, index %d",
+    ALOGV("setStreamVolumeIndex() stream %d, device %08x, index %d",
           stream, device, index);
 
-    // if device is AUDIO_DEVICE_OUT_DEFAULT set default value and
-    // clear all device specific values
-    if (device == AUDIO_DEVICE_OUT_DEFAULT) {
-        mStreams.clearCurrentVolumeIndex(stream);
+    // update other private stream volumes which follow this one
+    for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+        if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+            continue;
+        }
+        mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
     }
-    mStreams.addCurrentVolumeIndex(stream, device, index);
 
-    // update volume on all outputs whose current device is also selected by the same
-    // strategy as the device specified by the caller
-    audio_devices_t strategyDevice = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
-
-
-    //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
-    audio_devices_t accessibilityDevice = AUDIO_DEVICE_NONE;
-    if (stream == AUDIO_STREAM_MUSIC) {
-        mStreams.addCurrentVolumeIndex(AUDIO_STREAM_ACCESSIBILITY, device, index);
-        accessibilityDevice = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, true /*fromCache*/);
-    }
-    if ((device != AUDIO_DEVICE_OUT_DEFAULT) &&
-            (device & (strategyDevice | accessibilityDevice)) == 0) {
-        return NO_ERROR;
-    }
+    // update volume on all outputs and streams matching the following:
+    // - The requested stream (or a stream matching for volume control) is active on the output
+    // - The device (or devices) selected by the strategy corresponding to this stream includes
+    // the requested device
+    // - For non default requested device, currently selected device on the output is either the
+    // requested device or one of the devices selected by the strategy
+    // - For default requested device (AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME), apply volume only if
+    // no specific device volume value exists for currently selected device.
     status_t status = NO_ERROR;
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
         audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
-        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & strategyDevice) != 0)) {
-            status_t volStatus = checkAndSetVolume(stream, index, desc, curDevice);
-            if (volStatus != NO_ERROR) {
-                status = volStatus;
+        for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+            if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+                continue;
             }
-        }
-        if ((accessibilityDevice != AUDIO_DEVICE_NONE) &&
-                ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & accessibilityDevice) != 0)))
-        {
-            status_t volStatus = checkAndSetVolume(AUDIO_STREAM_ACCESSIBILITY,
-                                                   index, desc, curDevice);
+            if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
+                    (isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
+                continue;
+            }
+            routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
+            audio_devices_t curStreamDevice = getDeviceForStrategy(curStrategy, true /*fromCache*/);
+            if ((curStreamDevice & device) == 0) {
+                continue;
+            }
+            bool applyDefault = false;
+            if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
+                curStreamDevice |= device;
+            } else if (!mVolumeCurves->hasVolumeIndexForDevice(
+                    stream, Volume::getDeviceForVolume(curStreamDevice))) {
+                applyDefault = true;
+            }
+
+            if (applyDefault || ((curDevice & curStreamDevice) != 0)) {
+                status_t volStatus =
+                        checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice);
+                if (volStatus != NO_ERROR) {
+                    status = volStatus;
+                }
+            }
         }
     }
     return status;
@@ -1773,14 +1906,14 @@
     if (!audio_is_output_device(device)) {
         return BAD_VALUE;
     }
-    // if device is AUDIO_DEVICE_OUT_DEFAULT, return volume for device corresponding to
+    // if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device corresponding to
     // the strategy the stream belongs to.
-    if (device == AUDIO_DEVICE_OUT_DEFAULT) {
+    if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
         device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
     }
     device = Volume::getDeviceForVolume(device);
 
-    *index =  mStreams.valueFor(stream).getVolumeIndex(device);
+    *index =  mVolumeCurves->getVolumeIndex(stream, device);
     ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
     return NO_ERROR;
 }
@@ -1860,7 +1993,14 @@
 
 bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
-    return mOutputs.isStreamActive(stream, inPastMs);
+    bool active = false;
+    for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT && !active; curStream++) {
+        if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+            continue;
+        }
+        active = mOutputs.isStreamActive((audio_stream_type_t)curStream, inPastMs);
+    }
+    return active;
 }
 
 bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
@@ -1872,23 +2012,9 @@
 {
     for (size_t i = 0; i < mInputs.size(); i++) {
         const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
-        if (inputDescriptor->mRefCount == 0) {
-            continue;
-        }
-        if (inputDescriptor->mInputSource == (int)source) {
+        if (inputDescriptor->isSourceActive(source)) {
             return true;
         }
-        // AUDIO_SOURCE_HOTWORD is equivalent to AUDIO_SOURCE_VOICE_RECOGNITION only if it
-        // corresponds to an active capture triggered by a hardware hotword recognition
-        if ((source == AUDIO_SOURCE_VOICE_RECOGNITION) &&
-                 (inputDescriptor->mInputSource == AUDIO_SOURCE_HOTWORD)) {
-            // FIXME: we should not assume that the first session is the active one and keep
-            // activity count per session. Same in startInput().
-            ssize_t index = mSoundTriggerSessions.indexOfKey(inputDescriptor->mSessions.itemAt(0));
-            if (index >= 0) {
-                return true;
-            }
-        }
     }
     return false;
 }
@@ -1918,94 +2044,160 @@
 
 status_t AudioPolicyManager::registerPolicyMixes(Vector<AudioMix> mixes)
 {
-    sp<HwModule> module;
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[i]->mName) == 0 &&
-                mHwModules[i]->mHandle != 0) {
-            module = mHwModules[i];
+    ALOGV("registerPolicyMixes() %zu mix(es)", mixes.size());
+    status_t res = NO_ERROR;
+
+    sp<HwModule> rSubmixModule;
+    // examine each mix's route type
+    for (size_t i = 0; i < mixes.size(); i++) {
+        // we only support MIX_ROUTE_FLAG_LOOP_BACK or MIX_ROUTE_FLAG_RENDER, not the combination
+        if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_ALL) == MIX_ROUTE_FLAG_ALL) {
+            res = INVALID_OPERATION;
             break;
         }
-    }
+        if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
+            // Loop back through "remote submix"
+            if (rSubmixModule == 0) {
+                for (size_t j = 0; i < mHwModules.size(); j++) {
+                    if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
+                            && mHwModules[j]->mHandle != 0) {
+                        rSubmixModule = mHwModules[j];
+                        break;
+                    }
+                }
+            }
 
-    if (module == 0) {
-        return INVALID_OPERATION;
-    }
+            ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
 
-    ALOGV("registerPolicyMixes() num mixes %d", mixes.size());
+            if (rSubmixModule == 0) {
+                ALOGE(" Unable to find audio module for submix, aborting mix %zu registration", i);
+                res = INVALID_OPERATION;
+                break;
+            }
 
-    for (size_t i = 0; i < mixes.size(); i++) {
-        String8 address = mixes[i].mRegistrationId;
+            String8 address = mixes[i].mDeviceAddress;
 
-        if (mPolicyMixes.registerMix(address, mixes[i]) != NO_ERROR) {
-            continue;
-        }
-        audio_config_t outputConfig = mixes[i].mFormat;
-        audio_config_t inputConfig = mixes[i].mFormat;
-        // NOTE: audio flinger mixer does not support mono output: configure remote submix HAL in
-        // stereo and let audio flinger do the channel conversion if needed.
-        outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
-        inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
-        module->addOutputProfile(address, &outputConfig,
-                                 AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
-        module->addInputProfile(address, &inputConfig,
-                                 AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
+            if (mPolicyMixes.registerMix(address, mixes[i], 0 /*output desc*/) != NO_ERROR) {
+                ALOGE(" Error registering mix %zu for address %s", i, address.string());
+                res = INVALID_OPERATION;
+                break;
+            }
+            audio_config_t outputConfig = mixes[i].mFormat;
+            audio_config_t inputConfig = mixes[i].mFormat;
+            // NOTE: audio flinger mixer does not support mono output: configure remote submix HAL in
+            // stereo and let audio flinger do the channel conversion if needed.
+            outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+            inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
+            rSubmixModule->addOutputProfile(address, &outputConfig,
+                    AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
+            rSubmixModule->addInputProfile(address, &inputConfig,
+                    AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
 
-        if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
-            setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                                     address.string(), "remote-submix");
-        } else {
-            setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                                     address.string(), "remote-submix");
+            if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
+                setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+                        AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                        address.string(), "remote-submix");
+            } else {
+                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                        AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                        address.string(), "remote-submix");
+            }
+        } else if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+            String8 address = mixes[i].mDeviceAddress;
+            audio_devices_t device = mixes[i].mDeviceType;
+            ALOGV(" registerPolicyMixes() mix %zu of %zu is RENDER, dev=0x%X addr=%s",
+                    i, mixes.size(), device, address.string());
+
+            bool foundOutput = false;
+            for (size_t j = 0 ; j < mOutputs.size() ; j++) {
+                sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(j);
+                sp<AudioPatch> patch = mAudioPatches.valueFor(desc->getPatchHandle());
+                if ((patch != 0) && (patch->mPatch.num_sinks != 0)
+                        && (patch->mPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE)
+                        && (patch->mPatch.sinks[0].ext.device.type == device)
+                        && (strncmp(patch->mPatch.sinks[0].ext.device.address, address.string(),
+                                AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
+                    if (mPolicyMixes.registerMix(address, mixes[i], desc) != NO_ERROR) {
+                        res = INVALID_OPERATION;
+                    } else {
+                        foundOutput = true;
+                    }
+                    break;
+                }
+            }
+
+            if (res != NO_ERROR) {
+                ALOGE(" Error registering mix %zu for device 0x%X addr %s",
+                        i, device, address.string());
+                res = INVALID_OPERATION;
+                break;
+            } else if (!foundOutput) {
+                ALOGE(" Output not found for mix %zu for device 0x%X addr %s",
+                        i, device, address.string());
+                res = INVALID_OPERATION;
+                break;
+            }
         }
     }
-    return NO_ERROR;
+    if (res != NO_ERROR) {
+        unregisterPolicyMixes(mixes);
+    }
+    return res;
 }
 
 status_t AudioPolicyManager::unregisterPolicyMixes(Vector<AudioMix> mixes)
 {
-    sp<HwModule> module;
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[i]->mName) == 0 &&
-                mHwModules[i]->mHandle != 0) {
-            module = mHwModules[i];
-            break;
-        }
-    }
-
-    if (module == 0) {
-        return INVALID_OPERATION;
-    }
-
-    ALOGV("unregisterPolicyMixes() num mixes %d", mixes.size());
-
+    ALOGV("unregisterPolicyMixes() num mixes %zu", mixes.size());
+    status_t res = NO_ERROR;
+    sp<HwModule> rSubmixModule;
+    // examine each mix's route type
     for (size_t i = 0; i < mixes.size(); i++) {
-        String8 address = mixes[i].mRegistrationId;
+        if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
 
-        if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
-            continue;
-        }
+            if (rSubmixModule == 0) {
+                for (size_t j = 0; i < mHwModules.size(); j++) {
+                    if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
+                            && mHwModules[j]->mHandle != 0) {
+                        rSubmixModule = mHwModules[j];
+                        break;
+                    }
+                }
+            }
+            if (rSubmixModule == 0) {
+                res = INVALID_OPERATION;
+                continue;
+            }
 
-        if (getDeviceConnectionState(AUDIO_DEVICE_IN_REMOTE_SUBMIX, address.string()) ==
-                                             AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
-        {
-            setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                                     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                     address.string(), "remote-submix");
-        }
+            String8 address = mixes[i].mDeviceAddress;
 
-        if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
-                                             AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
-        {
-            setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                     address.string(), "remote-submix");
+            if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
+                res = INVALID_OPERATION;
+                continue;
+            }
+
+            if (getDeviceConnectionState(AUDIO_DEVICE_IN_REMOTE_SUBMIX, address.string()) ==
+                    AUDIO_POLICY_DEVICE_STATE_AVAILABLE)  {
+                setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+                        AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                        address.string(), "remote-submix");
+            }
+            if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
+                    AUDIO_POLICY_DEVICE_STATE_AVAILABLE)  {
+                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                        AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                        address.string(), "remote-submix");
+            }
+            rSubmixModule->removeOutputProfile(address);
+            rSubmixModule->removeInputProfile(address);
+
+        } if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+            if (mPolicyMixes.unregisterMix(mixes[i].mDeviceAddress) != NO_ERROR) {
+                res = INVALID_OPERATION;
+                continue;
+            }
         }
-        module->removeOutputProfile(address);
-        module->removeInputProfile(address);
     }
-    return NO_ERROR;
+    return res;
 }
 
 
@@ -2037,17 +2229,22 @@
     snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
             mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
     result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for encoded surround output %d\n",
+            mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND));
+    result.append(buffer);
     snprintf(buffer, SIZE, " TTS output %s\n", mTtsOutputAvailable ? "available" : "not available");
     result.append(buffer);
+    snprintf(buffer, SIZE, " Master mono: %s\n", mMasterMono ? "on" : "off");
+    result.append(buffer);
 
     write(fd, result.string(), result.size());
 
-    mAvailableOutputDevices.dump(fd, String8("output"));
-    mAvailableInputDevices.dump(fd, String8("input"));
+    mAvailableOutputDevices.dump(fd, String8("Available output"));
+    mAvailableInputDevices.dump(fd, String8("Available input"));
     mHwModules.dump(fd);
     mOutputs.dump(fd);
     mInputs.dump(fd);
-    mStreams.dump(fd);
+    mVolumeCurves->dump(fd);
     mEffects.dump(fd);
     mAudioPatches.dump(fd);
 
@@ -2066,6 +2263,10 @@
      offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us,
      offloadInfo.has_video);
 
+    if (mMasterMono) {
+        return false; // no offloading if mono is set.
+    }
+
     // Check if offload has been disabled
     char propValue[PROPERTY_VALUE_MAX];
     if (property_get("audio.offload.disable", propValue, "0")) {
@@ -2141,19 +2342,29 @@
     size_t portsMax = *num_ports;
     *num_ports = 0;
     if (type == AUDIO_PORT_TYPE_NONE || type == AUDIO_PORT_TYPE_DEVICE) {
+        // do not report devices with type AUDIO_DEVICE_IN_STUB or AUDIO_DEVICE_OUT_STUB
+        // as they are used by stub HALs by convention
         if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
-            for (size_t i = 0;
-                    i  < mAvailableOutputDevices.size() && portsWritten < portsMax; i++) {
-                mAvailableOutputDevices[i]->toAudioPort(&ports[portsWritten++]);
+            for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
+                if (mAvailableOutputDevices[i]->type() == AUDIO_DEVICE_OUT_STUB) {
+                    continue;
+                }
+                if (portsWritten < portsMax) {
+                    mAvailableOutputDevices[i]->toAudioPort(&ports[portsWritten++]);
+                }
+                (*num_ports)++;
             }
-            *num_ports += mAvailableOutputDevices.size();
         }
         if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
-            for (size_t i = 0;
-                    i  < mAvailableInputDevices.size() && portsWritten < portsMax; i++) {
-                mAvailableInputDevices[i]->toAudioPort(&ports[portsWritten++]);
+            for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+                if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_STUB) {
+                    continue;
+                }
+                if (portsWritten < portsMax) {
+                    mAvailableInputDevices[i]->toAudioPort(&ports[portsWritten++]);
+                }
+                (*num_ports)++;
             }
-            *num_ports += mAvailableInputDevices.size();
         }
     }
     if (type == AUDIO_PORT_TYPE_NONE || type == AUDIO_PORT_TYPE_MIX) {
@@ -2223,7 +2434,7 @@
                                                            patch->sources[0].type);
 #if LOG_NDEBUG == 0
     for (size_t i = 0; i < patch->num_sinks; i++) {
-        ALOGV("createAudioPatch sink %d: id %d role %d type %d", i, patch->sinks[i].id,
+        ALOGV("createAudioPatch sink %zu: id %d role %d type %d", i, patch->sinks[i].id,
                                                              patch->sinks[i].role,
                                                              patch->sinks[i].type);
     }
@@ -2237,7 +2448,7 @@
             return INVALID_OPERATION;
         }
     } else {
-        *handle = 0;
+        *handle = AUDIO_PATCH_HANDLE_NONE;
     }
 
     if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
@@ -2392,7 +2603,7 @@
                 // - source and sink devices are on differnt HW modules OR
                 // - audio HAL version is < 3.0
                 if ((srcDeviceDesc->getModuleHandle() != sinkDeviceDesc->getModuleHandle()) ||
-                        (srcDeviceDesc->mModule->mHalVersion < AUDIO_DEVICE_API_VERSION_3_0)) {
+                        (srcDeviceDesc->mModule->getHalVersion() < AUDIO_DEVICE_API_VERSION_3_0)) {
                     // support only one sink device for now to simplify output selection logic
                     if (patch->num_sinks > 1) {
                         return INVALID_OPERATION;
@@ -2588,6 +2799,7 @@
 
 void AudioPolicyManager::releaseResourcesForUid(uid_t uid)
 {
+    clearAudioSources(uid);
     clearAudioPatches(uid);
     clearSessionRoutes(uid);
 }
@@ -2602,7 +2814,6 @@
     }
 }
 
-
 void AudioPolicyManager::checkStrategyRoute(routing_strategy strategy,
                                             audio_io_handle_t ouptutToSkip)
 {
@@ -2620,10 +2831,7 @@
         // invalidate all tracks in this strategy to force re connection.
         // Otherwise select new device on the output mix.
         if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
-            for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-                if (stream == AUDIO_STREAM_PATCH) {
-                    continue;
-                }
+            for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
                 if (getStrategy((audio_stream_type_t)stream) == strategy) {
                     mpClientInterface->invalidateStream((audio_stream_type_t)stream);
                 }
@@ -2668,7 +2876,7 @@
     SortedVector<audio_io_handle_t> inputsToClose;
     for (size_t i = 0; i < mInputs.size(); i++) {
         sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(i);
-        if (affectedSources.indexOf(inputDesc->mInputSource) >= 0) {
+        if (affectedSources.indexOf(inputDesc->inputSource()) >= 0) {
             inputsToClose.add(inputDesc->mIoHandle);
         }
     }
@@ -2677,28 +2885,240 @@
     }
 }
 
+void AudioPolicyManager::clearAudioSources(uid_t uid)
+{
+    for (ssize_t i = (ssize_t)mAudioSources.size() - 1; i >= 0; i--)  {
+        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        if (sourceDesc->mUid == uid) {
+            stopAudioSource(mAudioSources.keyAt(i));
+        }
+    }
+}
 
 status_t AudioPolicyManager::acquireSoundTriggerSession(audio_session_t *session,
                                        audio_io_handle_t *ioHandle,
                                        audio_devices_t *device)
 {
-    *session = (audio_session_t)mpClientInterface->newAudioUniqueId();
-    *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId();
+    *session = (audio_session_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+    *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_INPUT);
     *device = getDeviceAndMixForInputSource(AUDIO_SOURCE_HOTWORD);
 
     return mSoundTriggerSessions.acquireSession(*session, *ioHandle);
 }
 
-status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source __unused,
-                                       const audio_attributes_t *attributes __unused,
-                                       audio_io_handle_t *handle __unused)
+status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source,
+                                  const audio_attributes_t *attributes,
+                                  audio_io_handle_t *handle,
+                                  uid_t uid)
 {
-    return INVALID_OPERATION;
+    ALOGV("%s source %p attributes %p handle %p", __FUNCTION__, source, attributes, handle);
+    if (source == NULL || attributes == NULL || handle == NULL) {
+        return BAD_VALUE;
+    }
+
+    *handle = AUDIO_IO_HANDLE_NONE;
+
+    if (source->role != AUDIO_PORT_ROLE_SOURCE ||
+            source->type != AUDIO_PORT_TYPE_DEVICE) {
+        ALOGV("%s INVALID_OPERATION source->role %d source->type %d", __FUNCTION__, source->role, source->type);
+        return INVALID_OPERATION;
+    }
+
+    sp<DeviceDescriptor> srcDeviceDesc =
+            mAvailableInputDevices.getDevice(source->ext.device.type,
+                                              String8(source->ext.device.address));
+    if (srcDeviceDesc == 0) {
+        ALOGV("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
+        return BAD_VALUE;
+    }
+    sp<AudioSourceDescriptor> sourceDesc =
+            new AudioSourceDescriptor(srcDeviceDesc, attributes, uid);
+
+    struct audio_patch dummyPatch;
+    sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
+    sourceDesc->mPatchDesc = patchDesc;
+
+    status_t status = connectAudioSource(sourceDesc);
+    if (status == NO_ERROR) {
+        mAudioSources.add(sourceDesc->getHandle(), sourceDesc);
+        *handle = sourceDesc->getHandle();
+    }
+    return status;
+}
+
+status_t AudioPolicyManager::connectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
+{
+    ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
+
+    // make sure we only have one patch per source.
+    disconnectAudioSource(sourceDesc);
+
+    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
+    audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
+    sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->mDevice;
+
+    audio_devices_t sinkDevice = getDeviceForStrategy(strategy, true);
+    sp<DeviceDescriptor> sinkDeviceDesc =
+            mAvailableOutputDevices.getDevice(sinkDevice, String8(""));
+
+    audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+    struct audio_patch *patch = &sourceDesc->mPatchDesc->mPatch;
+
+    if (srcDeviceDesc->getAudioPort()->mModule->getHandle() ==
+            sinkDeviceDesc->getAudioPort()->mModule->getHandle() &&
+            srcDeviceDesc->getAudioPort()->mModule->getHalVersion() >= AUDIO_DEVICE_API_VERSION_3_0 &&
+            srcDeviceDesc->getAudioPort()->mGains.size() > 0) {
+        ALOGV("%s AUDIO_DEVICE_API_VERSION_3_0", __FUNCTION__);
+        //   create patch between src device and output device
+        //   create Hwoutput and add to mHwOutputs
+    } else {
+        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(sinkDevice, mOutputs);
+        audio_io_handle_t output =
+                selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+        if (output == AUDIO_IO_HANDLE_NONE) {
+            ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevice);
+            return INVALID_OPERATION;
+        }
+        sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+        if (outputDesc->isDuplicated()) {
+            ALOGV("%s output for device %08x is duplicated", __FUNCTION__, sinkDevice);
+            return INVALID_OPERATION;
+        }
+        // create a special patch with no sink and two sources:
+        // - the second source indicates to PatchPanel through which output mix this patch should
+        // be connected as well as the stream type for volume control
+        // - the sink is defined by whatever output device is currently selected for the output
+        // though which this patch is routed.
+        patch->num_sinks = 0;
+        patch->num_sources = 2;
+        srcDeviceDesc->toAudioPortConfig(&patch->sources[0], NULL);
+        outputDesc->toAudioPortConfig(&patch->sources[1], NULL);
+        patch->sources[1].ext.mix.usecase.stream = stream;
+        status_t status = mpClientInterface->createAudioPatch(patch,
+                                                              &afPatchHandle,
+                                                              0);
+        ALOGV("%s patch panel returned %d patchHandle %d", __FUNCTION__,
+                                                              status, afPatchHandle);
+        if (status != NO_ERROR) {
+            ALOGW("%s patch panel could not connect device patch, error %d",
+                  __FUNCTION__, status);
+            return INVALID_OPERATION;
+        }
+        uint32_t delayMs = 0;
+        status = startSource(outputDesc, stream, sinkDevice, NULL, &delayMs);
+
+        if (status != NO_ERROR) {
+            mpClientInterface->releaseAudioPatch(sourceDesc->mPatchDesc->mAfPatchHandle, 0);
+            return status;
+        }
+        sourceDesc->mSwOutput = outputDesc;
+        if (delayMs != 0) {
+            usleep(delayMs * 1000);
+        }
+    }
+
+    sourceDesc->mPatchDesc->mAfPatchHandle = afPatchHandle;
+    addAudioPatch(sourceDesc->mPatchDesc->mHandle, sourceDesc->mPatchDesc);
+
+    return NO_ERROR;
 }
 
 status_t AudioPolicyManager::stopAudioSource(audio_io_handle_t handle __unused)
 {
-    return INVALID_OPERATION;
+    sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueFor(handle);
+    ALOGV("%s handle %d", __FUNCTION__, handle);
+    if (sourceDesc == 0) {
+        ALOGW("%s unknown source for handle %d", __FUNCTION__, handle);
+        return BAD_VALUE;
+    }
+    status_t status = disconnectAudioSource(sourceDesc);
+
+    mAudioSources.removeItem(handle);
+    return status;
+}
+
+status_t AudioPolicyManager::setMasterMono(bool mono)
+{
+    if (mMasterMono == mono) {
+        return NO_ERROR;
+    }
+    mMasterMono = mono;
+    // if enabling mono we close all offloaded devices, which will invalidate the
+    // corresponding AudioTrack. The AudioTrack client/MediaPlayer is responsible
+    // for recreating the new AudioTrack as non-offloaded PCM.
+    //
+    // If disabling mono, we leave all tracks as is: we don't know which clients
+    // and tracks are able to be recreated as offloaded. The next "song" should
+    // play back offloaded.
+    if (mMasterMono) {
+        Vector<audio_io_handle_t> offloaded;
+        for (size_t i = 0; i < mOutputs.size(); ++i) {
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            if (desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+                offloaded.push(desc->mIoHandle);
+            }
+        }
+        for (size_t i = 0; i < offloaded.size(); ++i) {
+            closeOutput(offloaded[i]);
+        }
+    }
+    // update master mono for all remaining outputs
+    for (size_t i = 0; i < mOutputs.size(); ++i) {
+        updateMono(mOutputs.keyAt(i));
+    }
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getMasterMono(bool *mono)
+{
+    *mono = mMasterMono;
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
+{
+    ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
+
+    sp<AudioPatch> patchDesc = mAudioPatches.valueFor(sourceDesc->mPatchDesc->mHandle);
+    if (patchDesc == 0) {
+        ALOGW("%s source has no patch with handle %d", __FUNCTION__,
+              sourceDesc->mPatchDesc->mHandle);
+        return BAD_VALUE;
+    }
+    removeAudioPatch(sourceDesc->mPatchDesc->mHandle);
+
+    audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
+    sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->mSwOutput.promote();
+    if (swOutputDesc != 0) {
+        stopSource(swOutputDesc, stream, false);
+        mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+    } else {
+        sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->mHwOutput.promote();
+        if (hwOutputDesc != 0) {
+          //   release patch between src device and output device
+          //   close Hwoutput and remove from mHwOutputs
+        } else {
+            ALOGW("%s source has neither SW nor HW output", __FUNCTION__);
+        }
+    }
+    return NO_ERROR;
+}
+
+sp<AudioSourceDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
+        audio_io_handle_t output, routing_strategy strategy)
+{
+    sp<AudioSourceDescriptor> source;
+    for (size_t i = 0; i < mAudioSources.size(); i++)  {
+        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        routing_strategy sourceStrategy =
+                (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
+        sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->mSwOutput.promote();
+        if (sourceStrategy == strategy && outputDesc != 0 && outputDesc->mIoHandle == output) {
+            source = sourceDesc;
+            break;
+        }
+    }
+    return source;
 }
 
 // ----------------------------------------------------------------------------
@@ -2716,13 +3136,42 @@
 #endif //AUDIO_POLICY_TEST
     mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
     mA2dpSuspended(false),
-    mSpeakerDrcEnabled(false),
     mAudioPortGeneration(1),
     mBeaconMuteRefCount(0),
     mBeaconPlayingRefCount(0),
     mBeaconMuted(false),
-    mTtsOutputAvailable(false)
+    mTtsOutputAvailable(false),
+    mMasterMono(false)
 {
+    mUidCached = getuid();
+    mpClientInterface = clientInterface;
+
+    // TODO: remove when legacy conf file is removed. true on devices that use DRC on the
+    // DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
+    // Note: remove also speaker_drc_enabled from global configuration of XML config file.
+    bool speakerDrcEnabled = false;
+
+#ifdef USE_XML_AUDIO_POLICY_CONF
+    mVolumeCurves = new VolumeCurvesCollection();
+    AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
+                             mDefaultOutputDevice, speakerDrcEnabled,
+                             static_cast<VolumeCurvesCollection *>(mVolumeCurves));
+    PolicySerializer serializer;
+    if (serializer.deserialize(AUDIO_POLICY_XML_CONFIG_FILE, config) != NO_ERROR) {
+#else
+    mVolumeCurves = new StreamDescriptorCollection();
+    AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
+                             mDefaultOutputDevice, speakerDrcEnabled);
+    if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, config) != NO_ERROR) &&
+            (ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, config) != NO_ERROR)) {
+#endif
+        ALOGE("could not load audio policy configuration file, setting defaults");
+        config.setDefault();
+    }
+    // must be done after reading the policy (since conditionned by Speaker Drc Enabling)
+    mVolumeCurves->initializeVolumeCurves(speakerDrcEnabled);
+
+    // Once policy config has been parsed, retrieve an instance of the engine and initialize it.
     audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
     if (!engineInstance) {
         ALOGE("%s:  Could not get an instance of policy engine", __FUNCTION__);
@@ -2738,32 +3187,14 @@
     status_t status = mEngine->initCheck();
     ALOG_ASSERT(status == NO_ERROR, "Policy engine not initialized(err=%d)", status);
 
-    mUidCached = getuid();
-    mpClientInterface = clientInterface;
-
-    mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
-    if (ConfigParsingUtils::loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE,
-                 mHwModules, mAvailableInputDevices, mAvailableOutputDevices,
-                 mDefaultOutputDevice, mSpeakerDrcEnabled) != NO_ERROR) {
-        if (ConfigParsingUtils::loadAudioPolicyConfig(AUDIO_POLICY_CONFIG_FILE,
-                                  mHwModules, mAvailableInputDevices, mAvailableOutputDevices,
-                                  mDefaultOutputDevice, mSpeakerDrcEnabled) != NO_ERROR) {
-            ALOGE("could not load audio policy configuration file, setting defaults");
-            defaultAudioPolicyConfig();
-        }
-    }
     // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
-
-    // must be done after reading the policy (since conditionned by Speaker Drc Enabling)
-    mEngine->initializeVolumeCurves(mSpeakerDrcEnabled);
-
     // open all output streams needed to access attached devices
     audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
     audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
     for (size_t i = 0; i < mHwModules.size(); i++) {
-        mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->mName);
+        mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->getName());
         if (mHwModules[i]->mHandle == 0) {
-            ALOGW("could not open HW module %s", mHwModules[i]->mName);
+            ALOGW("could not open HW module %s", mHwModules[i]->getName());
             continue;
         }
         // open all output streams needed to access attached devices
@@ -2774,35 +3205,34 @@
         {
             const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
 
-            if (outProfile->mSupportedDevices.isEmpty()) {
-                ALOGW("Output profile contains no device on module %s", mHwModules[i]->mName);
+            if (!outProfile->hasSupportedDevices()) {
+                ALOGW("Output profile contains no device on module %s", mHwModules[i]->getName());
                 continue;
             }
-            if ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_TTS) != 0) {
+            if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0) {
                 mTtsOutputAvailable = true;
             }
 
-            if ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+            if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
                 continue;
             }
-            audio_devices_t profileType = outProfile->mSupportedDevices.types();
+            audio_devices_t profileType = outProfile->getSupportedDevicesType();
             if ((profileType & mDefaultOutputDevice->type()) != AUDIO_DEVICE_NONE) {
                 profileType = mDefaultOutputDevice->type();
             } else {
-                // chose first device present in mSupportedDevices also part of
+                // chose first device present in profile's SupportedDevices also part of
                 // outputDeviceTypes
-                for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
-                    profileType = outProfile->mSupportedDevices[k]->type();
-                    if ((profileType & outputDeviceTypes) != 0) {
-                        break;
-                    }
-                }
+                profileType = outProfile->getSupportedDeviceForType(outputDeviceTypes);
             }
             if ((profileType & outputDeviceTypes) == 0) {
                 continue;
             }
             sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
                                                                                  mpClientInterface);
+            const DeviceVector &supportedDevices = outProfile->getSupportedDevices();
+            const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
+            String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+                    : String8("");
 
             outputDesc->mDevice = profileType;
             audio_config_t config = AUDIO_CONFIG_INITIALIZER;
@@ -2814,36 +3244,37 @@
                                                             &output,
                                                             &config,
                                                             &outputDesc->mDevice,
-                                                            String8(""),
+                                                            address,
                                                             &outputDesc->mLatency,
                                                             outputDesc->mFlags);
 
             if (status != NO_ERROR) {
                 ALOGW("Cannot open output stream for device %08x on hw module %s",
                       outputDesc->mDevice,
-                      mHwModules[i]->mName);
+                      mHwModules[i]->getName());
             } else {
                 outputDesc->mSamplingRate = config.sample_rate;
                 outputDesc->mChannelMask = config.channel_mask;
                 outputDesc->mFormat = config.format;
 
-                for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
-                    audio_devices_t type = outProfile->mSupportedDevices[k]->type();
-                    ssize_t index =
-                            mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[k]);
+                for (size_t k = 0; k  < supportedDevices.size(); k++) {
+                    ssize_t index = mAvailableOutputDevices.indexOf(supportedDevices[k]);
                     // give a valid ID to an attached device once confirmed it is reachable
                     if (index >= 0 && !mAvailableOutputDevices[index]->isAttached()) {
                         mAvailableOutputDevices[index]->attach(mHwModules[i]);
                     }
                 }
                 if (mPrimaryOutput == 0 &&
-                        outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+                        outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
                     mPrimaryOutput = outputDesc;
                 }
                 addOutput(output, outputDesc);
                 setOutputDevice(outputDesc,
                                 outputDesc->mDevice,
-                                true);
+                                true,
+                                0,
+                                NULL,
+                                address.string());
             }
         }
         // open input streams needed to access attached devices to validate
@@ -2852,25 +3283,20 @@
         {
             const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
 
-            if (inProfile->mSupportedDevices.isEmpty()) {
-                ALOGW("Input profile contains no device on module %s", mHwModules[i]->mName);
+            if (!inProfile->hasSupportedDevices()) {
+                ALOGW("Input profile contains no device on module %s", mHwModules[i]->getName());
                 continue;
             }
-            // chose first device present in mSupportedDevices also part of
+            // chose first device present in profile's SupportedDevices also part of
             // inputDeviceTypes
-            audio_devices_t profileType = AUDIO_DEVICE_NONE;
-            for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
-                profileType = inProfile->mSupportedDevices[k]->type();
-                if (profileType & inputDeviceTypes) {
-                    break;
-                }
-            }
+            audio_devices_t profileType = inProfile->getSupportedDeviceForType(inputDeviceTypes);
+
             if ((profileType & inputDeviceTypes) == 0) {
                 continue;
             }
-            sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile);
+            sp<AudioInputDescriptor> inputDesc =
+                    new AudioInputDescriptor(inProfile);
 
-            inputDesc->mInputSource = AUDIO_SOURCE_MIC;
             inputDesc->mDevice = profileType;
 
             // find the address
@@ -2895,10 +3321,9 @@
                                                            AUDIO_INPUT_FLAG_NONE);
 
             if (status == NO_ERROR) {
-                for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
-                    audio_devices_t type = inProfile->mSupportedDevices[k]->type();
-                    ssize_t index =
-                            mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[k]);
+                const DeviceVector &supportedDevices = inProfile->getSupportedDevices();
+                for (size_t k = 0; k  < supportedDevices.size(); k++) {
+                    ssize_t index =  mAvailableInputDevices.indexOf(supportedDevices[k]);
                     // give a valid ID to an attached device once confirmed it is reachable
                     if (index >= 0) {
                         sp<DeviceDescriptor> devDesc = mAvailableInputDevices[index];
@@ -2912,14 +3337,14 @@
             } else {
                 ALOGW("Cannot open input stream for device %08x on hw module %s",
                       inputDesc->mDevice,
-                      mHwModules[i]->mName);
+                      mHwModules[i]->getName());
             }
         }
     }
     // make sure all attached devices have been allocated a unique ID
     for (size_t i = 0; i  < mAvailableOutputDevices.size();) {
         if (!mAvailableOutputDevices[i]->isAttached()) {
-            ALOGW("Input device %08x unreachable", mAvailableOutputDevices[i]->type());
+            ALOGW("Output device %08x unreachable", mAvailableOutputDevices[i]->type());
             mAvailableOutputDevices.remove(mAvailableOutputDevices[i]);
             continue;
         }
@@ -2940,7 +3365,7 @@
         i++;
     }
     // make sure default device is reachable
-    if (mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
+    if (mDefaultOutputDevice == 0 || mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
         ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->type());
     }
 
@@ -3160,6 +3585,7 @@
 {
     outputDesc->setIoHandle(output);
     mOutputs.add(output, outputDesc);
+    updateMono(output); // update mono status when adding to output list
     nextAudioPortGeneration();
 }
 
@@ -3180,7 +3606,7 @@
         const String8 address /*in*/,
         SortedVector<audio_io_handle_t>& outputs /*out*/) {
     sp<DeviceDescriptor> devDesc =
-        desc->mProfile->mSupportedDevices.getDevice(device, address);
+        desc->mProfile->getSupportedDeviceByAddress(device, address);
     if (devDesc != 0) {
         ALOGV("findIoHandlesByAddress(): adding opened output %d on same address %s",
               desc->mIoHandle, address.string());
@@ -3198,7 +3624,7 @@
 
     if (audio_device_is_digital(device)) {
         // erase all current sample rates, formats and channel masks
-        devDesc->clearCapabilities();
+        devDesc->clearAudioProfiles();
     }
 
     if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
@@ -3225,9 +3651,9 @@
             for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
             {
                 sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
-                if (profile->mSupportedDevices.types() & device) {
+                if (profile->supportDevice(device)) {
                     if (!device_distinguishes_on_address(device) ||
-                            address == profile->mSupportedDevices[0]->mAddress) {
+                            profile->supportDeviceAddress(address)) {
                         profiles.add(profile);
                         ALOGV("checkOutputsForDevice(): adding profile %zu from module %zu", j, i);
                     }
@@ -3235,7 +3661,7 @@
             }
         }
 
-        ALOGV("  found %d profiles, %d outputs", profiles.size(), outputs.size());
+        ALOGV("  found %zu profiles, %zu outputs", profiles.size(), outputs.size());
 
         if (profiles.isEmpty() && outputs.isEmpty()) {
             ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
@@ -3294,55 +3720,15 @@
                     mpClientInterface->setParameters(output, String8(param));
                     free(param);
                 }
-
-                // Here is where we step through and resolve any "dynamic" fields
-                String8 reply;
-                char *value;
-                if (profile->mSamplingRates[0] == 0) {
-                    reply = mpClientInterface->getParameters(output,
-                                            String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES));
-                    ALOGV("checkOutputsForDevice() supported sampling rates %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadSamplingRates(value + 1);
-                    }
-                }
-                if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
-                    reply = mpClientInterface->getParameters(output,
-                                                   String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
-                    ALOGV("checkOutputsForDevice() supported formats %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadFormats(value + 1);
-                    }
-                }
-                if (profile->mChannelMasks[0] == 0) {
-                    reply = mpClientInterface->getParameters(output,
-                                                  String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS));
-                    ALOGV("checkOutputsForDevice() supported channel masks %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadOutChannels(value + 1);
-                    }
-                }
-                if (((profile->mSamplingRates[0] == 0) &&
-                         (profile->mSamplingRates.size() < 2)) ||
-                     ((profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) &&
-                         (profile->mFormats.size() < 2)) ||
-                     ((profile->mChannelMasks[0] == 0) &&
-                         (profile->mChannelMasks.size() < 2))) {
+                updateAudioProfiles(device, output, profile->getAudioProfiles());
+                if (!profile->hasValidAudioProfile()) {
                     ALOGW("checkOutputsForDevice() missing param");
                     mpClientInterface->closeOutput(output);
                     output = AUDIO_IO_HANDLE_NONE;
-                } else if (profile->mSamplingRates[0] == 0 || profile->mFormats[0] == 0 ||
-                            profile->mChannelMasks[0] == 0) {
+                } else if (profile->hasDynamicAudioProfile()) {
                     mpClientInterface->closeOutput(output);
-                    config.sample_rate = profile->pickSamplingRate();
-                    config.channel_mask = profile->pickChannelMask();
-                    config.format = profile->pickFormat();
+                    output = AUDIO_IO_HANDLE_NONE;
+                    profile->pickAudioProfile(config.sample_rate, config.channel_mask, config.format);
                     config.offload_info.sample_rate = config.sample_rate;
                     config.offload_info.channel_mask = config.channel_mask;
                     config.offload_info.format = config.format;
@@ -3463,21 +3849,10 @@
             for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
             {
                 sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
-                if (profile->mSupportedDevices.types() & device) {
+                if (profile->supportDevice(device)) {
                     ALOGV("checkOutputsForDevice(): "
                             "clearing direct output profile %zu on module %zu", j, i);
-                    if (profile->mSamplingRates[0] == 0) {
-                        profile->mSamplingRates.clear();
-                        profile->mSamplingRates.add(0);
-                    }
-                    if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
-                        profile->mFormats.clear();
-                        profile->mFormats.add(AUDIO_FORMAT_DEFAULT);
-                    }
-                    if (profile->mChannelMasks[0] == 0) {
-                        profile->mChannelMasks.clear();
-                        profile->mChannelMasks.add(0);
-                    }
+                    profile->clearAudioProfiles();
                 }
             }
         }
@@ -3495,14 +3870,14 @@
 
     if (audio_device_is_digital(device)) {
         // erase all current sample rates, formats and channel masks
-        devDesc->clearCapabilities();
+        devDesc->clearAudioProfiles();
     }
 
     if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
         // first list already open inputs that can be routed to this device
         for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
             desc = mInputs.valueAt(input_index);
-            if (desc->mProfile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
+            if (desc->mProfile->supportDevice(device)) {
                 ALOGV("checkInputsForDevice(): adding opened input %d", mInputs.keyAt(input_index));
                inputs.add(mInputs.keyAt(input_index));
             }
@@ -3521,9 +3896,9 @@
             {
                 sp<IOProfile> profile = mHwModules[module_idx]->mInputProfiles[profile_index];
 
-                if (profile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
+                if (profile->supportDevice(device)) {
                     if (!device_distinguishes_on_address(device) ||
-                            address == profile->mSupportedDevices[0]->mAddress) {
+                            profile->supportDeviceAddress(address)) {
                         profiles.add(profile);
                         ALOGV("checkInputsForDevice(): adding profile %zu from module %zu",
                               profile_index, module_idx);
@@ -3583,42 +3958,8 @@
                     mpClientInterface->setParameters(input, String8(param));
                     free(param);
                 }
-
-                // Here is where we step through and resolve any "dynamic" fields
-                String8 reply;
-                char *value;
-                if (profile->mSamplingRates[0] == 0) {
-                    reply = mpClientInterface->getParameters(input,
-                                            String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES));
-                    ALOGV("checkInputsForDevice() direct input sup sampling rates %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadSamplingRates(value + 1);
-                    }
-                }
-                if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
-                    reply = mpClientInterface->getParameters(input,
-                                                   String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
-                    ALOGV("checkInputsForDevice() direct input sup formats %s", reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadFormats(value + 1);
-                    }
-                }
-                if (profile->mChannelMasks[0] == 0) {
-                    reply = mpClientInterface->getParameters(input,
-                                                  String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS));
-                    ALOGV("checkInputsForDevice() direct input sup channel masks %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadInChannels(value + 1);
-                    }
-                }
-                if (((profile->mSamplingRates[0] == 0) && (profile->mSamplingRates.size() < 2)) ||
-                     ((profile->mFormats[0] == 0) && (profile->mFormats.size() < 2)) ||
-                     ((profile->mChannelMasks[0] == 0) && (profile->mChannelMasks.size() < 2))) {
+                updateAudioProfiles(device, input, profile->getAudioProfiles());
+                if (!profile->hasValidAudioProfile()) {
                     ALOGW("checkInputsForDevice() direct input missing param");
                     mpClientInterface->closeInput(input);
                     input = AUDIO_IO_HANDLE_NONE;
@@ -3651,8 +3992,7 @@
         // check if one opened input is not needed any more after disconnecting one device
         for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
             desc = mInputs.valueAt(input_index);
-            if (!(desc->mProfile->mSupportedDevices.types() & mAvailableInputDevices.types() &
-                    ~AUDIO_DEVICE_BIT_IN)) {
+            if (!(desc->mProfile->supportDevice(mAvailableInputDevices.types()))) {
                 ALOGV("checkInputsForDevice(): disconnecting adding input %d",
                       mInputs.keyAt(input_index));
                 inputs.add(mInputs.keyAt(input_index));
@@ -3667,21 +4007,10 @@
                  profile_index < mHwModules[module_index]->mInputProfiles.size();
                  profile_index++) {
                 sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index];
-                if (profile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
+                if (profile->supportDevice(device)) {
                     ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu",
                           profile_index, module_index);
-                    if (profile->mSamplingRates[0] == 0) {
-                        profile->mSamplingRates.clear();
-                        profile->mSamplingRates.add(0);
-                    }
-                    if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
-                        profile->mFormats.clear();
-                        profile->mFormats.add(AUDIO_FORMAT_DEFAULT);
-                    }
-                    if (profile->mChannelMasks[0] == 0) {
-                        profile->mChannelMasks.clear();
-                        profile->mChannelMasks.add(0);
-                    }
+                    profile->clearAudioProfiles();
                 }
             }
         }
@@ -3732,7 +4061,7 @@
 
     nextAudioPortGeneration();
 
-    ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+    ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
         status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -3761,7 +4090,7 @@
 
     nextAudioPortGeneration();
 
-    ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+    ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
         status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -3841,6 +4170,11 @@
                 setStrategyMute(strategy, true, desc);
                 setStrategyMute(strategy, false, desc, MUTE_TIME_MS, newDevice);
             }
+            sp<AudioSourceDescriptor> source =
+                    getSourceForStrategyOnOutput(srcOutputs[i], strategy);
+            if (source != 0){
+                connectAudioSource(source);
+            }
         }
 
         // Move effects associated to this strategy from previous output to new output
@@ -3863,10 +4197,7 @@
             }
         }
         // Move tracks associated to this strategy from previous output to new output
-        for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
-            if (i == AUDIO_STREAM_PATCH) {
-                continue;
-            }
+        for (int i = 0; i < AUDIO_STREAM_FOR_POLICY_CNT; i++) {
             if (getStrategy((audio_stream_type_t)i) == strategy) {
                 mpClientInterface->invalidateStream((audio_stream_type_t)i);
             }
@@ -3941,12 +4272,12 @@
 {
     audio_devices_t device = AUDIO_DEVICE_NONE;
 
-    ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+    ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
         if (patchDesc->mUid != mUidCached) {
             ALOGV("getNewOutputDevice() device %08x forced by patch %d",
-                  outputDesc->device(), outputDesc->mPatchHandle);
+                  outputDesc->device(), outputDesc->getPatchHandle());
             return outputDesc->device();
         }
     }
@@ -3958,10 +4289,10 @@
     //      use device for strategy phone
     // 3: the strategy for enforced audible is active but not enforced on the output:
     //      use the device for strategy enforced audible
-    // 4: the strategy accessibility is active on the output:
-    //      use device for strategy accessibility
-    // 5: the strategy sonification is active on the output:
+    // 4: the strategy sonification is active on the output:
     //      use device for strategy sonification
+    // 5: the strategy accessibility is active on the output:
+    //      use device for strategy accessibility
     // 6: the strategy "respectful" sonification is active on the output:
     //      use device for strategy "respectful" sonification
     // 7: the strategy media is active on the output:
@@ -3978,10 +4309,10 @@
         device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE)) {
         device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
-    } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
-        device = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION)) {
         device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
+        device = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION_RESPECTFUL)) {
         device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
     } else if (isStrategyActive(outputDesc, STRATEGY_MEDIA)) {
@@ -4002,21 +4333,28 @@
 {
     sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
 
-    ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+    ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
         if (patchDesc->mUid != mUidCached) {
             ALOGV("getNewInputDevice() device %08x forced by patch %d",
-                  inputDesc->mDevice, inputDesc->mPatchHandle);
+                  inputDesc->mDevice, inputDesc->getPatchHandle());
             return inputDesc->mDevice;
         }
     }
 
-    audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->mInputSource);
+    audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->inputSource());
 
     return device;
 }
 
+bool AudioPolicyManager::streamsMatchForvolume(audio_stream_type_t stream1,
+                                               audio_stream_type_t stream2) {
+    return ((stream1 == stream2) ||
+            ((stream1 == AUDIO_STREAM_ACCESSIBILITY) && (stream2 == AUDIO_STREAM_MUSIC)) ||
+            ((stream1 == AUDIO_STREAM_MUSIC) && (stream2 == AUDIO_STREAM_ACCESSIBILITY)));
+}
+
 uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
     return (uint32_t)getStrategy(stream);
 }
@@ -4028,16 +4366,22 @@
     if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_PUBLIC_CNT) {
         return AUDIO_DEVICE_NONE;
     }
-    audio_devices_t devices;
-    routing_strategy strategy = getStrategy(stream);
-    devices = getDeviceForStrategy(strategy, true /*fromCache*/);
-    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(devices, mOutputs);
-    for (size_t i = 0; i < outputs.size(); i++) {
-        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
-        if (isStrategyActive(outputDesc, strategy)) {
-            devices = outputDesc->device();
-            break;
+    audio_devices_t devices = AUDIO_DEVICE_NONE;
+    for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+        if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+            continue;
         }
+        routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
+        audio_devices_t curDevices =
+                getDeviceForStrategy((routing_strategy)curStrategy, true /*fromCache*/);
+        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(curDevices, mOutputs);
+        for (size_t i = 0; i < outputs.size(); i++) {
+            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+            if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
+                curDevices |= outputDesc->device();
+            }
+        }
+        devices |= curDevices;
     }
 
     /*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
@@ -4046,7 +4390,6 @@
         devices |= AUDIO_DEVICE_OUT_SPEAKER;
         devices &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE;
     }
-
     return devices;
 }
 
@@ -4150,15 +4493,8 @@
     // the device = the device from the descriptor in the RouteMap, and exit.
     for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(); routeIndex++) {
         sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex);
-        routing_strategy strat = getStrategy(route->mStreamType);
-        // Special case for accessibility strategy which must follow any strategy it is
-        // currently remapped to
-        bool strategyMatch = (strat == strategy) ||
-                             ((strategy == STRATEGY_ACCESSIBILITY) &&
-                              ((mEngine->getStrategyForUsage(
-                                      AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY) == strat) ||
-                               (strat == STRATEGY_MEDIA)));
-        if (strategyMatch && route->isActive()) {
+        routing_strategy routeStrategy = getStrategy(route->mStreamType);
+        if ((routeStrategy == strategy) && route->isActive()) {
             return route->mDeviceDescriptor->type();
         }
     }
@@ -4302,7 +4638,7 @@
     // Doing this check here allows the caller to call setOutputDevice() without conditions
     if ((device == AUDIO_DEVICE_NONE || device == prevDevice) &&
         !force &&
-        outputDesc->mPatchHandle != 0) {
+        outputDesc->getPatchHandle() != 0) {
         ALOGV("setOutputDevice() setting same device 0x%04x or null device", device);
         return muteWaitMs;
     }
@@ -4313,9 +4649,13 @@
     if (device == AUDIO_DEVICE_NONE) {
         resetOutputDevice(outputDesc, delayMs, NULL);
     } else {
-        DeviceVector deviceList = (address == NULL) ?
-                mAvailableOutputDevices.getDevicesFromType(device)
-                : mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
+        DeviceVector deviceList;
+        if ((address == NULL) || (strlen(address) == 0)) {
+            deviceList = mAvailableOutputDevices.getDevicesFromType(device);
+        } else {
+            deviceList = mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
+        }
+
         if (!deviceList.isEmpty()) {
             struct audio_patch patch;
             outputDesc->toAudioPortConfig(&patch.sources[0]);
@@ -4329,7 +4669,7 @@
             if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
                 index = mAudioPatches.indexOfKey(*patchHandle);
             } else {
-                index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+                index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
             }
             sp< AudioPatch> patchDesc;
             audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -4352,11 +4692,10 @@
                     patchDesc->mPatch = patch;
                 }
                 patchDesc->mAfPatchHandle = afPatchHandle;
-                patchDesc->mUid = mUidCached;
                 if (patchHandle) {
                     *patchHandle = patchDesc->mHandle;
                 }
-                outputDesc->mPatchHandle = patchDesc->mHandle;
+                outputDesc->setPatchHandle(patchDesc->mHandle);
                 nextAudioPortGeneration();
                 mpClientInterface->onAudioPatchListUpdate();
             }
@@ -4391,7 +4730,7 @@
     if (patchHandle) {
         index = mAudioPatches.indexOfKey(*patchHandle);
     } else {
-        index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+        index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
     }
     if (index < 0) {
         return INVALID_OPERATION;
@@ -4399,7 +4738,7 @@
     sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
     status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, delayMs);
     ALOGV("resetOutputDevice() releaseAudioPatch returned %d", status);
-    outputDesc->mPatchHandle = 0;
+    outputDesc->setPatchHandle(AUDIO_PATCH_HANDLE_NONE);
     removeAudioPatch(patchDesc->mHandle);
     nextAudioPortGeneration();
     mpClientInterface->onAudioPatchListUpdate();
@@ -4424,7 +4763,7 @@
             // AUDIO_SOURCE_HOTWORD is for internal use only:
             // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
             if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD &&
-                    !inputDesc->mIsSoundTrigger) {
+                    !inputDesc->isSoundTrigger()) {
                 patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
             }
             patch.num_sinks = 1;
@@ -4435,7 +4774,7 @@
             if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
                 index = mAudioPatches.indexOfKey(*patchHandle);
             } else {
-                index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+                index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
             }
             sp< AudioPatch> patchDesc;
             audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -4457,11 +4796,10 @@
                     patchDesc->mPatch = patch;
                 }
                 patchDesc->mAfPatchHandle = afPatchHandle;
-                patchDesc->mUid = mUidCached;
                 if (patchHandle) {
                     *patchHandle = patchDesc->mHandle;
                 }
-                inputDesc->mPatchHandle = patchDesc->mHandle;
+                inputDesc->setPatchHandle(patchDesc->mHandle);
                 nextAudioPortGeneration();
                 mpClientInterface->onAudioPatchListUpdate();
             }
@@ -4478,7 +4816,7 @@
     if (patchHandle) {
         index = mAudioPatches.indexOfKey(*patchHandle);
     } else {
-        index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+        index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     }
     if (index < 0) {
         return INVALID_OPERATION;
@@ -4486,7 +4824,7 @@
     sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
     status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
     ALOGV("resetInputDevice() releaseAudioPatch returned %d", status);
-    inputDesc->mPatchHandle = 0;
+    inputDesc->setPatchHandle(AUDIO_PATCH_HANDLE_NONE);
     removeAudioPatch(patchDesc->mHandle);
     nextAudioPortGeneration();
     mpClientInterface->onAudioPatchListUpdate();
@@ -4557,14 +4895,15 @@
 }
 
 float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
-                                            int index,
-                                            audio_devices_t device)
+                                        int index,
+                                        audio_devices_t device)
 {
-    float volumeDb = mEngine->volIndexToDb(Volume::getDeviceCategory(device), stream, index);
-
+    float volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
     // if a headset is connected, apply the following rules to ring tones and notifications
     // to avoid sound level bursts in user's ears:
-    // - always attenuate ring tones and notifications volume by 6dB
+    // - always attenuate notifications volume by 6dB
+    // - attenuate ring tones volume by 6dB unless music is not playing and
+    // speaker is part of the select devices
     // - if music is playing, always limit the volume to current music volume,
     // with a minimum threshold at -36dB so that notification is always perceived.
     const routing_strategy stream_strategy = getStrategy(stream);
@@ -4577,27 +4916,43 @@
                 || (stream == AUDIO_STREAM_SYSTEM)
                 || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) &&
                     (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) &&
-            mStreams.canBeMuted(stream)) {
-        volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
+            mVolumeCurves->canBeMuted(stream)) {
         // when the phone is ringing we must consider that music could have been paused just before
         // by the music application and behave as if music was active if the last music track was
         // just stopped
         if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
                 mLimitRingtoneVolume) {
+            volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
             audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/);
             float musicVolDB = computeVolume(AUDIO_STREAM_MUSIC,
-                                 mStreams.valueFor(AUDIO_STREAM_MUSIC).getVolumeIndex(musicDevice),
-                               musicDevice);
+                                             mVolumeCurves->getVolumeIndex(AUDIO_STREAM_MUSIC,
+                                                                              musicDevice),
+                                             musicDevice);
             float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
                     musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB;
-            if (volumeDb > minVolDB) {
-                volumeDb = minVolDB;
+            if (volumeDB > minVolDB) {
+                volumeDB = minVolDB;
                 ALOGV("computeVolume limiting volume to %f musicVol %f", minVolDB, musicVolDB);
             }
+            if (device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
+                    AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES)) {
+                // on A2DP, also ensure notification volume is not too low compared to media when
+                // intended to be played
+                if ((volumeDB > -96.0f) &&
+                        (musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB > volumeDB)) {
+                    ALOGV("computeVolume increasing volume for stream=%d device=0x%X from %f to %f",
+                            stream, device,
+                            volumeDB, musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB);
+                    volumeDB = musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB;
+                }
+            }
+        } else if ((Volume::getDeviceForVolume(device) != AUDIO_DEVICE_OUT_SPEAKER) ||
+                stream_strategy != STRATEGY_SONIFICATION) {
+            volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
         }
     }
 
-    return volumeDb;
+    return volumeDB;
 }
 
 status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
@@ -4639,12 +4994,12 @@
         float voiceVolume;
         // Force voice volume to max for bluetooth SCO as volume is managed by the headset
         if (stream == AUDIO_STREAM_VOICE_CALL) {
-            voiceVolume = (float)index/(float)mStreams.valueFor(stream).getVolumeIndexMax();
+            voiceVolume = (float)index/(float)mVolumeCurves->getVolumeIndexMax(stream);
         } else {
             voiceVolume = 1.0;
         }
 
-        if (voiceVolume != mLastVoiceVolume && outputDesc == mPrimaryOutput) {
+        if (voiceVolume != mLastVoiceVolume) {
             mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
             mLastVoiceVolume = voiceVolume;
         }
@@ -4660,12 +5015,9 @@
 {
     ALOGVV("applyStreamVolumes() for device %08x", device);
 
-    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-        if (stream == AUDIO_STREAM_PATCH) {
-            continue;
-        }
+    for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
         checkAndSetVolume((audio_stream_type_t)stream,
-                          mStreams.valueFor((audio_stream_type_t)stream).getVolumeIndex(device),
+                          mVolumeCurves->getVolumeIndex((audio_stream_type_t)stream, device),
                           outputDesc,
                           device,
                           delayMs,
@@ -4681,10 +5033,7 @@
 {
     ALOGVV("setStrategyMute() strategy %d, mute %d, output ID %d",
            strategy, on, outputDesc->getId());
-    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-        if (stream == AUDIO_STREAM_PATCH) {
-            continue;
-        }
+    for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
         if (getStrategy((audio_stream_type_t)stream) == strategy) {
             setStreamMute((audio_stream_type_t)stream, on, outputDesc, delayMs, device);
         }
@@ -4697,7 +5046,6 @@
                                            int delayMs,
                                            audio_devices_t device)
 {
-    const StreamDescriptor& streamDesc = mStreams.valueFor(stream);
     if (device == AUDIO_DEVICE_NONE) {
         device = outputDesc->device();
     }
@@ -4707,7 +5055,7 @@
 
     if (on) {
         if (outputDesc->mMuteCount[stream] == 0) {
-            if (streamDesc.canBeMuted() &&
+            if (mVolumeCurves->canBeMuted(stream) &&
                     ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
                      (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) {
                 checkAndSetVolume(stream, 0, outputDesc, device, delayMs);
@@ -4722,7 +5070,7 @@
         }
         if (--outputDesc->mMuteCount[stream] == 0) {
             checkAndSetVolume(stream,
-                              streamDesc.getVolumeIndex(device),
+                              mVolumeCurves->getVolumeIndex(stream, device),
                               outputDesc,
                               device,
                               delayMs);
@@ -4779,39 +5127,6 @@
     }
 }
 
-
-
-void AudioPolicyManager::defaultAudioPolicyConfig(void)
-{
-    sp<HwModule> module;
-    sp<IOProfile> profile;
-    sp<DeviceDescriptor> defaultInputDevice =
-                    new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
-    mAvailableOutputDevices.add(mDefaultOutputDevice);
-    mAvailableInputDevices.add(defaultInputDevice);
-
-    module = new HwModule("primary");
-
-    profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SOURCE);
-    profile->attach(module);
-    profile->mSamplingRates.add(44100);
-    profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
-    profile->mChannelMasks.add(AUDIO_CHANNEL_OUT_STEREO);
-    profile->mSupportedDevices.add(mDefaultOutputDevice);
-    profile->mFlags = AUDIO_OUTPUT_FLAG_PRIMARY;
-    module->mOutputProfiles.add(profile);
-
-    profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SINK);
-    profile->attach(module);
-    profile->mSamplingRates.add(8000);
-    profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
-    profile->mChannelMasks.add(AUDIO_CHANNEL_IN_MONO);
-    profile->mSupportedDevices.add(defaultInputDevice);
-    module->mInputProfiles.add(profile);
-
-    mHwModules.add(module);
-}
-
 audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
 {
     // flags to stream type mapping
@@ -4832,15 +5147,6 @@
     case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
         return AUDIO_STREAM_MUSIC;
     case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        if (isStreamActive(AUDIO_STREAM_ALARM)) {
-            return AUDIO_STREAM_ALARM;
-        }
-        if (isStreamActive(AUDIO_STREAM_RING)) {
-            return AUDIO_STREAM_RING;
-        }
-        if (isInCall()) {
-            return AUDIO_STREAM_VOICE_CALL;
-        }
         return AUDIO_STREAM_ACCESSIBILITY;
     case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
         return AUDIO_STREAM_SYSTEM;
@@ -4907,10 +5213,7 @@
     if ((sysTime == 0) && (inPastMs != 0)) {
         sysTime = systemTime();
     }
-    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
-        if (i == AUDIO_STREAM_PATCH) {
-            continue;
-        }
+    for (int i = 0; i < (int)AUDIO_STREAM_FOR_POLICY_CNT; i++) {
         if (((getStrategy((audio_stream_type_t)i) == strategy) ||
                 (NUM_STRATEGIES == strategy)) &&
                 outputDesc->isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
@@ -4935,4 +5238,221 @@
     return is_state_in_call(state);
 }
 
+void AudioPolicyManager::cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc)
+{
+    for (ssize_t i = (ssize_t)mAudioSources.size() - 1; i >= 0; i--)  {
+        sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueAt(i);
+        if (sourceDesc->mDevice->equals(deviceDesc)) {
+            ALOGV("%s releasing audio source %d", __FUNCTION__, sourceDesc->getHandle());
+            stopAudioSource(sourceDesc->getHandle());
+        }
+    }
+
+    for (ssize_t i = (ssize_t)mAudioPatches.size() - 1; i >= 0; i--)  {
+        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(i);
+        bool release = false;
+        for (size_t j = 0; j < patchDesc->mPatch.num_sources && !release; j++)  {
+            const struct audio_port_config *source = &patchDesc->mPatch.sources[j];
+            if (source->type == AUDIO_PORT_TYPE_DEVICE &&
+                    source->ext.device.type == deviceDesc->type()) {
+                release = true;
+            }
+        }
+        for (size_t j = 0; j < patchDesc->mPatch.num_sinks && !release; j++)  {
+            const struct audio_port_config *sink = &patchDesc->mPatch.sinks[j];
+            if (sink->type == AUDIO_PORT_TYPE_DEVICE &&
+                    sink->ext.device.type == deviceDesc->type()) {
+                release = true;
+            }
+        }
+        if (release) {
+            ALOGV("%s releasing patch %u", __FUNCTION__, patchDesc->mHandle);
+            releaseAudioPatch(patchDesc->mHandle, patchDesc->mUid);
+        }
+    }
+}
+
+// Modify the list of surround sound formats supported.
+void AudioPolicyManager::filterSurroundFormats(FormatVector *formatsPtr) {
+    FormatVector &formats = *formatsPtr;
+    // TODO Set this based on Config properties.
+    const bool alwaysForceAC3 = true;
+
+    audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
+    ALOGD("%s: forced use = %d", __FUNCTION__, forceUse);
+
+    // Analyze original support for various formats.
+    bool supportsAC3 = false;
+    bool supportsOtherSurround = false;
+    bool supportsIEC61937 = false;
+    for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
+        audio_format_t format = formats[formatIndex];
+        switch (format) {
+            case AUDIO_FORMAT_AC3:
+                supportsAC3 = true;
+                break;
+            case AUDIO_FORMAT_E_AC3:
+            case AUDIO_FORMAT_DTS:
+            case AUDIO_FORMAT_DTS_HD:
+                supportsOtherSurround = true;
+                break;
+            case AUDIO_FORMAT_IEC61937:
+                supportsIEC61937 = true;
+                break;
+            default:
+                break;
+        }
+    }
+
+    // Modify formats based on surround preferences.
+    // If NEVER, remove support for surround formats.
+    if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
+        if (supportsAC3 || supportsOtherSurround || supportsIEC61937) {
+            // Remove surround sound related formats.
+            for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
+                audio_format_t format = formats[formatIndex];
+                switch(format) {
+                    case AUDIO_FORMAT_AC3:
+                    case AUDIO_FORMAT_E_AC3:
+                    case AUDIO_FORMAT_DTS:
+                    case AUDIO_FORMAT_DTS_HD:
+                    case AUDIO_FORMAT_IEC61937:
+                        formats.removeAt(formatIndex);
+                        break;
+                    default:
+                        formatIndex++; // keep it
+                        break;
+                }
+            }
+            supportsAC3 = false;
+            supportsOtherSurround = false;
+            supportsIEC61937 = false;
+        }
+    } else { // AUTO or ALWAYS
+        // Most TVs support AC3 even if they do not report it in the EDID.
+        if ((alwaysForceAC3 || (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS))
+                && !supportsAC3) {
+            formats.add(AUDIO_FORMAT_AC3);
+            supportsAC3 = true;
+        }
+
+        // If ALWAYS, add support for raw surround formats if all are missing.
+        // This assumes that if any of these formats are reported by the HAL
+        // then the report is valid and should not be modified.
+        if ((forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS)
+                && !supportsOtherSurround) {
+            formats.add(AUDIO_FORMAT_E_AC3);
+            formats.add(AUDIO_FORMAT_DTS);
+            formats.add(AUDIO_FORMAT_DTS_HD);
+            supportsOtherSurround = true;
+        }
+
+        // Add support for IEC61937 if any raw surround supported.
+        // The HAL could do this but add it here, just in case.
+        if ((supportsAC3 || supportsOtherSurround) && !supportsIEC61937) {
+            formats.add(AUDIO_FORMAT_IEC61937);
+            supportsIEC61937 = true;
+        }
+    }
+}
+
+// Modify the list of channel masks supported.
+void AudioPolicyManager::filterSurroundChannelMasks(ChannelsVector *channelMasksPtr) {
+    ChannelsVector &channelMasks = *channelMasksPtr;
+    audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
+
+    // If NEVER, then remove support for channelMasks > stereo.
+    if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
+        for (size_t maskIndex = 0; maskIndex < channelMasks.size(); ) {
+            audio_channel_mask_t channelMask = channelMasks[maskIndex];
+            if (channelMask & ~AUDIO_CHANNEL_OUT_STEREO) {
+                ALOGI("%s: force NEVER, so remove channelMask 0x%08x", __FUNCTION__, channelMask);
+                channelMasks.removeAt(maskIndex);
+            } else {
+                maskIndex++;
+            }
+        }
+    // If ALWAYS, then make sure we at least support 5.1
+    } else if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+        bool supports5dot1 = false;
+        // Are there any channel masks that can be considered "surround"?
+        for (size_t maskIndex = 0; maskIndex < channelMasks.size(); maskIndex++) {
+            audio_channel_mask_t channelMask = channelMasks[maskIndex];
+            if ((channelMask & AUDIO_CHANNEL_OUT_5POINT1) == AUDIO_CHANNEL_OUT_5POINT1) {
+                supports5dot1 = true;
+                break;
+            }
+        }
+        // If not then add 5.1 support.
+        if (!supports5dot1) {
+            channelMasks.add(AUDIO_CHANNEL_OUT_5POINT1);
+            ALOGI("%s: force ALWAYS, so adding channelMask for 5.1 surround", __FUNCTION__);
+        }
+    }
+}
+
+void AudioPolicyManager::updateAudioProfiles(audio_devices_t device,
+                                             audio_io_handle_t ioHandle,
+                                             AudioProfileVector &profiles)
+{
+    String8 reply;
+    char *value;
+
+    // Format MUST be checked first to update the list of AudioProfile
+    if (profiles.hasDynamicFormat()) {
+        reply = mpClientInterface->getParameters(ioHandle,
+                                                 String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
+        ALOGV("%s: supported formats %s", __FUNCTION__, reply.string());
+        AudioParameter repliedParameters(reply);
+        if (repliedParameters.get(
+                String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS), reply) != NO_ERROR) {
+            ALOGE("%s: failed to retrieve format, bailing out", __FUNCTION__);
+            return;
+        }
+        FormatVector formats = formatsFromString(reply.string());
+        if (device == AUDIO_DEVICE_OUT_HDMI) {
+            filterSurroundFormats(&formats);
+        }
+        profiles.setFormats(formats);
+    }
+    const FormatVector &supportedFormats = profiles.getSupportedFormats();
+
+    for (size_t formatIndex = 0; formatIndex < supportedFormats.size(); formatIndex++) {
+        audio_format_t format = supportedFormats[formatIndex];
+        ChannelsVector channelMasks;
+        SampleRateVector samplingRates;
+        AudioParameter requestedParameters;
+        requestedParameters.addInt(String8(AUDIO_PARAMETER_STREAM_FORMAT), format);
+
+        if (profiles.hasDynamicRateFor(format)) {
+            reply = mpClientInterface->getParameters(ioHandle,
+                                                     requestedParameters.toString() + ";" +
+                                                     AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES);
+            ALOGV("%s: supported sampling rates %s", __FUNCTION__, reply.string());
+            AudioParameter repliedParameters(reply);
+            if (repliedParameters.get(
+                    String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES), reply) == NO_ERROR) {
+                samplingRates = samplingRatesFromString(reply.string());
+            }
+        }
+        if (profiles.hasDynamicChannelsFor(format)) {
+            reply = mpClientInterface->getParameters(ioHandle,
+                                                     requestedParameters.toString() + ";" +
+                                                     AUDIO_PARAMETER_STREAM_SUP_CHANNELS);
+            ALOGV("%s: supported channel masks %s", __FUNCTION__, reply.string());
+            AudioParameter repliedParameters(reply);
+            if (repliedParameters.get(
+                    String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS), reply) == NO_ERROR) {
+                channelMasks = channelMasksFromString(reply.string());
+                if (device == AUDIO_DEVICE_OUT_HDMI) {
+                    filterSurroundChannelMasks(&channelMasks);
+                }
+            }
+        }
+        profiles.addProfileFromHal(new AudioProfile(format, channelMasks, samplingRates));
+    }
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index bbdf396..6c3e416 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -24,6 +24,7 @@
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/SortedVector.h>
+#include <media/AudioParameter.h>
 #include <media/AudioPolicy.h>
 #include "AudioPolicyInterface.h"
 
@@ -32,7 +33,6 @@
 #include <AudioGain.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
-#include <ConfigParsingUtils.h>
 #include <DeviceDescriptor.h>
 #include <IOProfile.h>
 #include <HwModule.h>
@@ -41,19 +41,19 @@
 #include <AudioPolicyMix.h>
 #include <EffectDescriptor.h>
 #include <SoundTriggerSession.h>
-#include <StreamDescriptor.h>
 #include <SessionRoute.h>
+#include <VolumeCurve.h>
 
 namespace android {
 
 // ----------------------------------------------------------------------------
 
 // Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB
-#define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5
 #define SONIFICATION_HEADSET_VOLUME_FACTOR_DB (-6)
 // Min volume for STRATEGY_SONIFICATION streams when limited by music volume: -36dB
-#define SONIFICATION_HEADSET_VOLUME_MIN  0.016
 #define SONIFICATION_HEADSET_VOLUME_MIN_DB  (-36)
+// Max volume difference on A2DP between playing media and STRATEGY_SONIFICATION streams: 12dB
+#define SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB (12)
 
 // Time in milliseconds during which we consider that music is still active after a music
 // track was stopped - see computeVolume()
@@ -211,6 +211,8 @@
                                           unsigned int *generation);
         virtual status_t setAudioPortConfig(const struct audio_port_config *config);
 
+        virtual void releaseResourcesForUid(uid_t uid);
+
         virtual status_t acquireSoundTriggerSession(audio_session_t *session,
                                                audio_io_handle_t *ioHandle,
                                                audio_devices_t *device);
@@ -225,14 +227,12 @@
 
         virtual status_t startAudioSource(const struct audio_port_config *source,
                                           const audio_attributes_t *attributes,
-                                          audio_io_handle_t *handle);
+                                          audio_io_handle_t *handle,
+                                          uid_t uid);
         virtual status_t stopAudioSource(audio_io_handle_t handle);
 
-        virtual void     releaseResourcesForUid(uid_t uid);
-
-        // Audio policy configuration file parsing (audio_policy.conf)
-        // TODO candidates to be moved to ConfigParsingUtils
-                void defaultAudioPolicyConfig(void);
+        virtual status_t setMasterMono(bool mono);
+        virtual status_t getMasterMono(bool *mono);
 
         // return the strategy corresponding to a given stream type
         routing_strategy getStrategy(audio_stream_type_t stream) const;
@@ -267,10 +267,7 @@
         {
             return mAvailableInputDevices;
         }
-        virtual StreamDescriptorCollection &getStreamDescriptors()
-        {
-            return mStreams;
-        }
+        virtual IVolumeCurvesCollection &getVolumeCurves() { return *mVolumeCurves; }
         virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
         {
             return mDefaultOutputDevice;
@@ -487,6 +484,7 @@
         status_t startSource(sp<AudioOutputDescriptor> outputDesc,
                              audio_stream_type_t stream,
                              audio_devices_t device,
+                             const char *address,
                              uint32_t *delayMs);
         status_t stopSource(sp<AudioOutputDescriptor> outputDesc,
                             audio_stream_type_t stream,
@@ -498,6 +496,20 @@
 
         status_t hasPrimaryOutput() const { return mPrimaryOutput != 0; }
 
+        status_t connectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc);
+        status_t disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc);
+
+        sp<AudioSourceDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
+                                                               routing_strategy strategy);
+
+        void cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc);
+
+        void clearAudioSources(uid_t uid);
+
+
+        static bool streamsMatchForvolume(audio_stream_type_t stream1,
+                                          audio_stream_type_t stream2);
+
         uid_t mUidCached;
         AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
         sp<SwAudioOutputDescriptor> mPrimaryOutput;     // primary output descriptor
@@ -515,7 +527,8 @@
         SessionRouteMap mOutputRoutes = SessionRouteMap(SessionRouteMap::MAPTYPE_OUTPUT);
         SessionRouteMap mInputRoutes = SessionRouteMap(SessionRouteMap::MAPTYPE_INPUT);
 
-        StreamDescriptorCollection mStreams; // stream descriptors for volume control
+        IVolumeCurvesCollection *mVolumeCurves; // Volume Curves per use case and device category
+
         bool    mLimitRingtoneVolume;        // limit ringtone volume to music volume if headset connected
         audio_devices_t mDeviceForStrategy[NUM_STRATEGIES];
         float   mLastVoiceVolume;            // last voice volume value sent to audio HAL
@@ -523,9 +536,6 @@
         EffectDescriptorCollection mEffects;  // list of registered audio effects
         bool    mA2dpSuspended;  // true if A2DP output is suspended
         sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
-        bool mSpeakerDrcEnabled;// true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER path
-                                // to boost soft sounds, used to adjust volume curves accordingly
-
         HwModuleCollection mHwModules;
 
         volatile int32_t mAudioPortGeneration;
@@ -537,6 +547,9 @@
         sp<AudioPatch> mCallTxPatch;
         sp<AudioPatch> mCallRxPatch;
 
+        HwAudioOutputCollection mHwOutputs;
+        AudioSourceCollection mAudioSources;
+
         // for supporting "beacon" streams, i.e. streams that only play on speaker, and never
         // when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
         enum {
@@ -550,6 +563,7 @@
         bool mBeaconMuted;              // has STREAM_TTS been muted
         bool mTtsOutputAvailable;       // true if a dedicated output for TTS stream is available
 
+        bool mMasterMono;               // true if we wish to force all outputs to mono
         AudioPolicyMixCollection mPolicyMixes; // list of registered mixes
 
 #ifdef AUDIO_POLICY_TEST
@@ -572,6 +586,14 @@
         // Audio Policy Engine Interface.
         AudioPolicyManagerInterface *mEngine;
 private:
+        // Add or remove AC3 DTS encodings based on user preferences.
+        void filterSurroundFormats(FormatVector *formatsPtr);
+        void filterSurroundChannelMasks(ChannelsVector *channelMasksPtr);
+
+        // If any, resolve any "dynamic" fields of an Audio Profiles collection
+        void updateAudioProfiles(audio_devices_t device, audio_io_handle_t ioHandle,
+                AudioProfileVector &profiles);
+
         // updates device caching and output for streams that can influence the
         //    routing of notifications
         void handleNotificationRoutingForStream(audio_stream_type_t stream);
@@ -595,6 +617,18 @@
                 audio_channel_mask_t channelMask,
                 audio_output_flags_t flags,
                 const audio_offload_info_t *offloadInfo);
+        // internal method to return the input handle for the given device and format
+        audio_io_handle_t getInputForDevice(audio_devices_t device,
+                String8 address,
+                audio_session_t session,
+                uid_t uid,
+                audio_source_t inputSource,
+                uint32_t samplingRate,
+                audio_format_t format,
+                audio_channel_mask_t channelMask,
+                audio_input_flags_t flags,
+                AudioMix *policyMix);
+
         // internal function to derive a stream type value from audio attributes
         audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
         // event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
@@ -614,6 +648,11 @@
                                                           audio_policy_dev_state_t state,
                                                           const char *device_address,
                                                           const char *device_name);
+        void updateMono(audio_io_handle_t output) {
+            AudioParameter param;
+            param.addInt(String8(AUDIO_PARAMETER_MONO_OUTPUT), (int)mMasterMono);
+            mpClientInterface->setParameters(output, param.toString());
+        }
 };
 
 };
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 489a9be..dbcc070 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -30,7 +30,7 @@
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
         ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
+        return AUDIO_MODULE_HANDLE_NONE;
     }
 
     return af->loadHwModule(name);
@@ -171,7 +171,7 @@
     return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
 }
 
-status_t AudioPolicyService::AudioPolicyClient::moveEffects(int session,
+status_t AudioPolicyService::AudioPolicyClient::moveEffects(audio_session_t session,
                         audio_io_handle_t src_output,
                         audio_io_handle_t dst_output)
 {
@@ -219,9 +219,18 @@
     mAudioPolicyService->onDynamicPolicyMixStateUpdate(regId, state);
 }
 
-audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId()
+void AudioPolicyService::AudioPolicyClient::onRecordingConfigurationUpdate(
+        int event, audio_session_t session, audio_source_t source,
+        const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+        audio_patch_handle_t patchHandle)
 {
-    return AudioSystem::newAudioUniqueId();
+    mAudioPolicyService->onRecordingConfigurationUpdate(event, session, source,
+            clientConfig, deviceConfig, patchHandle);
+}
+
+audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId(audio_unique_id_use_t use)
+{
+    return AudioSystem::newAudioUniqueId(use);
 }
 
 }; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
index a79f8ae..151d066 100644
--- a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
@@ -111,7 +111,7 @@
                                          uint32_t *pLatencyMs,
                                          audio_output_flags_t flags)
 {
-    return open_output((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+    return open_output(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask,
                           pLatencyMs, flags, NULL);
 }
 
@@ -190,7 +190,8 @@
     }
 
     if (((*pDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) == AUDIO_DEVICE_IN_REMOTE_SUBMIX)
-            && !captureAudioOutputAllowed()) {
+            && !captureAudioOutputAllowed(IPCThreadState::self()->getCallingPid(),
+                                          IPCThreadState::self()->getCallingUid())) {
         ALOGE("open_input() permission denied: capture not allowed");
         return AUDIO_IO_HANDLE_NONE;
     }
@@ -219,7 +220,7 @@
                                         audio_channel_mask_t *pChannelMask,
                                         audio_in_acoustics_t acoustics __unused)
 {
-    return  open_input((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+    return  open_input(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask);
 }
 
 audio_io_handle_t aps_open_input_on_module(void *service __unused,
@@ -252,7 +253,7 @@
     return af->invalidateStream(stream);
 }
 
-int aps_move_effects(void *service __unused, int session,
+int aps_move_effects(void *service __unused, audio_session_t session,
                                 audio_io_handle_t src_output,
                                 audio_io_handle_t dst_output)
 {
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 282ddeb..b732b20 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -79,7 +79,7 @@
 
 status_t AudioPolicyEffects::addInputEffects(audio_io_handle_t input,
                              audio_source_t inputSource,
-                             int audioSession)
+                             audio_session_t audioSession)
 {
     status_t status = NO_ERROR;
 
@@ -152,7 +152,7 @@
     return status;
 }
 
-status_t AudioPolicyEffects::queryDefaultInputEffects(int audioSession,
+status_t AudioPolicyEffects::queryDefaultInputEffects(audio_session_t audioSession,
                                                       effect_descriptor_t *descriptors,
                                                       uint32_t *count)
 {
@@ -185,7 +185,7 @@
 }
 
 
-status_t AudioPolicyEffects::queryDefaultOutputSessionEffects(int audioSession,
+status_t AudioPolicyEffects::queryDefaultOutputSessionEffects(audio_session_t audioSession,
                          effect_descriptor_t *descriptors,
                          uint32_t *count)
 {
@@ -220,7 +220,7 @@
 
 status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output,
                          audio_stream_type_t stream,
-                         int audioSession)
+                         audio_session_t audioSession)
 {
     status_t status = NO_ERROR;
 
@@ -275,7 +275,7 @@
 
 status_t AudioPolicyEffects::releaseOutputSessionEffects(audio_io_handle_t output,
                          audio_stream_type_t stream,
-                         int audioSession)
+                         audio_session_t audioSession)
 {
     status_t status = NO_ERROR;
     (void) output; // argument not used for now
@@ -323,7 +323,8 @@
     VOICE_CALL_SRC_TAG,
     CAMCORDER_SRC_TAG,
     VOICE_REC_SRC_TAG,
-    VOICE_COMM_SRC_TAG
+    VOICE_COMM_SRC_TAG,
+    UNPROCESSED_SRC_TAG
 };
 
 // returns the audio_source_t enum corresponding to the input source name or
@@ -372,7 +373,7 @@
 // Audio Effect Config parser
 // ----------------------------------------------------------------------------
 
-size_t AudioPolicyEffects::growParamSize(char *param,
+size_t AudioPolicyEffects::growParamSize(char **param,
                                          size_t size,
                                          size_t *curSize,
                                          size_t *totSize)
@@ -384,55 +385,82 @@
         while (pos + size > *totSize) {
             *totSize += ((*totSize + 7) / 8) * 4;
         }
-        param = (char *)realloc(param, *totSize);
+        *param = (char *)realloc(*param, *totSize);
+        if (*param == NULL) {
+            ALOGE("%s realloc error for size %zu", __func__, *totSize);
+            return 0;
+        }
     }
     *curSize = pos + size;
     return pos;
 }
 
+
 size_t AudioPolicyEffects::readParamValue(cnode *node,
-                                          char *param,
+                                          char **param,
                                           size_t *curSize,
                                           size_t *totSize)
 {
+    size_t len = 0;
+    size_t pos;
+
     if (strncmp(node->name, SHORT_TAG, sizeof(SHORT_TAG) + 1) == 0) {
-        size_t pos = growParamSize(param, sizeof(short), curSize, totSize);
-        *(short *)((char *)param + pos) = (short)atoi(node->value);
-        ALOGV("readParamValue() reading short %d", *(short *)((char *)param + pos));
-        return sizeof(short);
-    } else if (strncmp(node->name, INT_TAG, sizeof(INT_TAG) + 1) == 0) {
-        size_t pos = growParamSize(param, sizeof(int), curSize, totSize);
-        *(int *)((char *)param + pos) = atoi(node->value);
-        ALOGV("readParamValue() reading int %d", *(int *)((char *)param + pos));
-        return sizeof(int);
-    } else if (strncmp(node->name, FLOAT_TAG, sizeof(FLOAT_TAG) + 1) == 0) {
-        size_t pos = growParamSize(param, sizeof(float), curSize, totSize);
-        *(float *)((char *)param + pos) = (float)atof(node->value);
-        ALOGV("readParamValue() reading float %f",*(float *)((char *)param + pos));
-        return sizeof(float);
-    } else if (strncmp(node->name, BOOL_TAG, sizeof(BOOL_TAG) + 1) == 0) {
-        size_t pos = growParamSize(param, sizeof(bool), curSize, totSize);
-        if (strncmp(node->value, "false", strlen("false") + 1) == 0) {
-            *(bool *)((char *)param + pos) = false;
-        } else {
-            *(bool *)((char *)param + pos) = true;
+        pos = growParamSize(param, sizeof(short), curSize, totSize);
+        if (pos == 0) {
+            goto exit;
         }
-        ALOGV("readParamValue() reading bool %s",*(bool *)((char *)param + pos) ? "true" : "false");
-        return sizeof(bool);
+        *(short *)(*param + pos) = (short)atoi(node->value);
+        ALOGV("readParamValue() reading short %d", *(short *)(*param + pos));
+        len = sizeof(short);
+    } else if (strncmp(node->name, INT_TAG, sizeof(INT_TAG) + 1) == 0) {
+        pos = growParamSize(param, sizeof(int), curSize, totSize);
+        if (pos == 0) {
+            goto exit;
+        }
+        *(int *)(*param + pos) = atoi(node->value);
+        ALOGV("readParamValue() reading int %d", *(int *)(*param + pos));
+        len = sizeof(int);
+    } else if (strncmp(node->name, FLOAT_TAG, sizeof(FLOAT_TAG) + 1) == 0) {
+        pos = growParamSize(param, sizeof(float), curSize, totSize);
+        if (pos == 0) {
+            goto exit;
+        }
+        *(float *)(*param + pos) = (float)atof(node->value);
+        ALOGV("readParamValue() reading float %f",*(float *)(*param + pos));
+        len = sizeof(float);
+    } else if (strncmp(node->name, BOOL_TAG, sizeof(BOOL_TAG) + 1) == 0) {
+        pos = growParamSize(param, sizeof(bool), curSize, totSize);
+        if (pos == 0) {
+            goto exit;
+        }
+        if (strncmp(node->value, "true", strlen("true") + 1) == 0) {
+            *(bool *)(*param + pos) = true;
+        } else {
+            *(bool *)(*param + pos) = false;
+        }
+        ALOGV("readParamValue() reading bool %s",
+              *(bool *)(*param + pos) ? "true" : "false");
+        len = sizeof(bool);
     } else if (strncmp(node->name, STRING_TAG, sizeof(STRING_TAG) + 1) == 0) {
-        size_t len = strnlen(node->value, EFFECT_STRING_LEN_MAX);
+        len = strnlen(node->value, EFFECT_STRING_LEN_MAX);
         if (*curSize + len + 1 > *totSize) {
             *totSize = *curSize + len + 1;
-            param = (char *)realloc(param, *totSize);
+            *param = (char *)realloc(*param, *totSize);
+            if (*param == NULL) {
+                len = 0;
+                ALOGE("%s realloc error for string len %zu", __func__, *totSize);
+                goto exit;
+            }
         }
-        strncpy(param + *curSize, node->value, len);
+        strncpy(*param + *curSize, node->value, len);
         *curSize += len;
-        param[*curSize] = '\0';
-        ALOGV("readParamValue() reading string %s", param + *curSize - len);
-        return len;
+        (*param)[*curSize] = '\0';
+        ALOGV("readParamValue() reading string %s", *param + *curSize - len);
+    } else {
+        ALOGW("readParamValue() unknown param type %s", node->name);
     }
-    ALOGW("readParamValue() unknown param type %s", node->name);
-    return 0;
+exit:
+    return len;
 }
 
 effect_param_t *AudioPolicyEffects::loadEffectParameter(cnode *root)
@@ -443,6 +471,12 @@
     size_t totSize = sizeof(effect_param_t) + 2 * sizeof(int);
     effect_param_t *fx_param = (effect_param_t *)malloc(totSize);
 
+    if (fx_param == NULL) {
+        ALOGE("%s malloc error for effect structure of size %zu",
+              __func__, totSize);
+        return NULL;
+    }
+
     param = config_find(root, PARAM_TAG);
     value = config_find(root, VALUE_TAG);
     if (param == NULL && value == NULL) {
@@ -451,8 +485,10 @@
         if (param != NULL) {
             // Note: that a pair of random strings is read as 0 0
             int *ptr = (int *)fx_param->data;
+#if LOG_NDEBUG == 0
             int *ptr2 = (int *)((char *)param + sizeof(effect_param_t));
-            ALOGW("loadEffectParameter() ptr %p ptr2 %p", ptr, ptr2);
+            ALOGV("loadEffectParameter() ptr %p ptr2 %p", ptr, ptr2);
+#endif
             *ptr++ = atoi(param->name);
             *ptr = atoi(param->value);
             fx_param->psize = sizeof(int);
@@ -461,7 +497,8 @@
         }
     }
     if (param == NULL || value == NULL) {
-        ALOGW("loadEffectParameter() invalid parameter description %s", root->name);
+        ALOGW("loadEffectParameter() invalid parameter description %s",
+              root->name);
         goto error;
     }
 
@@ -469,7 +506,8 @@
     param = param->first_child;
     while (param) {
         ALOGV("loadEffectParameter() reading param of type %s", param->name);
-        size_t size = readParamValue(param, (char *)fx_param, &curSize, &totSize);
+        size_t size =
+                readParamValue(param, (char **)&fx_param, &curSize, &totSize);
         if (size == 0) {
             goto error;
         }
@@ -484,7 +522,8 @@
     value = value->first_child;
     while (value) {
         ALOGV("loadEffectParameter() reading value of type %s", value->name);
-        size_t size = readParamValue(value, (char *)fx_param, &curSize, &totSize);
+        size_t size =
+                readParamValue(value, (char **)&fx_param, &curSize, &totSize);
         if (size == 0) {
             goto error;
         }
@@ -495,7 +534,7 @@
     return fx_param;
 
 error:
-    delete fx_param;
+    free(fx_param);
     return NULL;
 }
 
@@ -505,11 +544,9 @@
     while (node) {
         ALOGV("loadEffectParameters() loading param %s", node->name);
         effect_param_t *param = loadEffectParameter(node);
-        if (param == NULL) {
-            node = node->next;
-            continue;
+        if (param != NULL) {
+            params.add(param);
         }
-        params.add(param);
         node = node->next;
     }
 }
@@ -527,6 +564,7 @@
     EffectDescVector *desc = new EffectDescVector();
     while (node) {
         size_t i;
+
         for (i = 0; i < effects.size(); i++) {
             if (strncmp(effects[i]->mName, node->name, EFFECT_STRING_LEN_MAX) == 0) {
                 ALOGV("loadEffectConfig() found effect %s in list", node->name);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index 3dec437..ee9bd50 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -51,7 +51,7 @@
 
     // Return a list of effect descriptors for default input effects
     // associated with audioSession
-    status_t queryDefaultInputEffects(int audioSession,
+    status_t queryDefaultInputEffects(audio_session_t audioSession,
                              effect_descriptor_t *descriptors,
                              uint32_t *count);
 
@@ -59,7 +59,7 @@
     // Effects are attached depending on the audio_source_t
     status_t addInputEffects(audio_io_handle_t input,
                              audio_source_t inputSource,
-                             int audioSession);
+                             audio_session_t audioSession);
 
     // Add all input effects associated to this input
     status_t releaseInputEffects(audio_io_handle_t input);
@@ -67,7 +67,7 @@
 
     // Return a list of effect descriptors for default output effects
     // associated with audioSession
-    status_t queryDefaultOutputSessionEffects(int audioSession,
+    status_t queryDefaultOutputSessionEffects(audio_session_t audioSession,
                              effect_descriptor_t *descriptors,
                              uint32_t *count);
 
@@ -75,12 +75,12 @@
     // Effects are attached depending on the audio_stream_type_t
     status_t addOutputSessionEffects(audio_io_handle_t output,
                              audio_stream_type_t stream,
-                             int audioSession);
+                             audio_session_t audioSession);
 
     // release all output effects associated with this output stream and audiosession
     status_t releaseOutputSessionEffects(audio_io_handle_t output,
                              audio_stream_type_t stream,
-                             int audioSession);
+                             audio_session_t audioSession);
 
 private:
 
@@ -135,13 +135,13 @@
     // class to store voctor of AudioEffects
     class EffectVector {
     public:
-        EffectVector(int session) : mSessionId(session), mRefCount(0) {}
+        EffectVector(audio_session_t session) : mSessionId(session), mRefCount(0) {}
         /*virtual*/ ~EffectVector() {}
 
         // Enable or disable all effects in effect vector
         void setProcessorEnabled(bool enabled);
 
-        const int mSessionId;
+        const audio_session_t mSessionId;
         // AudioPolicyManager keeps mLock, no need for lock on reference count here
         int mRefCount;
         Vector< sp<AudioEffect> >mEffects;
@@ -170,10 +170,10 @@
     void loadEffectParameters(cnode *root, Vector <effect_param_t *>& params);
     effect_param_t *loadEffectParameter(cnode *root);
     size_t readParamValue(cnode *node,
-                          char *param,
+                          char **param,
                           size_t *curSize,
                           size_t *totSize);
-    size_t growParamSize(char *param,
+    size_t growParamSize(char **param,
                          size_t size,
                          size_t *curSize,
                          size_t *totSize);
@@ -188,7 +188,7 @@
     // Automatic output effects are organized per audio_stream_type_t
     KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
     // Automatic output effects are unique for audiosession ID
-    KeyedVector< int32_t, EffectVector* > mOutputSessions;
+    KeyedVector< audio_session_t, EffectVector* > mOutputSessions;
 };
 
 }; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index ca365a5..c9b3abc 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -164,13 +164,11 @@
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
 
-    // if the caller is us, trust the specified uid
-    if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
-        uid_t newclientUid = IPCThreadState::self()->getCallingUid();
-        if (uid != (uid_t)-1 && uid != newclientUid) {
-            ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
-        }
-        uid = newclientUid;
+    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+    if (!isTrustedCallingUid(callingUid) || uid == (uid_t)-1) {
+        ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
+                "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
+        uid = callingUid;
     }
     return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, samplingRate,
                                     format, channelMask, flags, selectedDeviceId, offloadInfo);
@@ -262,6 +260,7 @@
 status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *input,
                                              audio_session_t session,
+                                             pid_t pid,
                                              uid_t uid,
                                              uint32_t samplingRate,
                                              audio_format_t format,
@@ -284,13 +283,22 @@
     sp<AudioPolicyEffects>audioPolicyEffects;
     status_t status;
     AudioPolicyInterface::input_type_t inputType;
-    // if the caller is us, trust the specified uid
-    if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
-        uid_t newclientUid = IPCThreadState::self()->getCallingUid();
-        if (uid != (uid_t)-1 && uid != newclientUid) {
-            ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
-        }
-        uid = newclientUid;
+
+    bool updatePid = (pid == -1);
+    const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+    if (!isTrustedCallingUid(callingUid)) {
+        ALOGW_IF(uid != (uid_t)-1 && uid != callingUid,
+                "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
+        uid = callingUid;
+        updatePid = true;
+    }
+
+    if (updatePid) {
+        const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        ALOGW_IF(pid != (pid_t)-1 && pid != callingPid,
+                 "%s uid %d pid %d tried to pass itself off as pid %d",
+                 __func__, callingUid, callingPid, pid);
+        pid = callingPid;
     }
 
     {
@@ -310,7 +318,7 @@
             case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
                 // FIXME: use the same permission as for remote submix for now.
             case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
-                if (!captureAudioOutputAllowed()) {
+                if (!captureAudioOutputAllowed(pid, uid)) {
                     ALOGE("getInputForAttr() permission denied: capture not allowed");
                     status = PERMISSION_DENIED;
                 }
@@ -463,6 +471,7 @@
     if (mAudioPolicyManager == NULL) {
         return AUDIO_DEVICE_NONE;
     }
+    Mutex::Autolock _l(mLock);
     return mAudioPolicyManager->getDevicesForStream(stream);
 }
 
@@ -479,12 +488,13 @@
 status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
                                 audio_io_handle_t io,
                                 uint32_t strategy,
-                                int session,
+                                audio_session_t session,
                                 int id)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
+    Mutex::Autolock _l(mEffectsLock);
     return mAudioPolicyManager->registerEffect(desc, io, strategy, session, id);
 }
 
@@ -493,6 +503,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
+    Mutex::Autolock _l(mEffectsLock);
     return mAudioPolicyManager->unregisterEffect(id);
 }
 
@@ -501,6 +512,7 @@
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
+    Mutex::Autolock _l(mEffectsLock);
     return mAudioPolicyManager->setEffectEnabled(id, enabled);
 }
 
@@ -537,7 +549,7 @@
     return mAudioPolicyManager->isSourceActive(source);
 }
 
-status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
+status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
                                                        effect_descriptor_t *descriptors,
                                                        uint32_t *count)
 {
@@ -563,7 +575,9 @@
         ALOGV("mAudioPolicyManager == NULL");
         return false;
     }
-
+    Mutex::Autolock _l(mLock);
+    Mutex::Autolock _le(mEffectsLock); // isOffloadSupported queries for
+                                      // non-offloadable effects
     return mAudioPolicyManager->isOffloadSupported(info);
 }
 
@@ -689,7 +703,8 @@
         return NO_INIT;
     }
 
-    return mAudioPolicyManager->startAudioSource(source, attributes, handle);
+    return mAudioPolicyManager->startAudioSource(source, attributes, handle,
+                                                 IPCThreadState::self()->getCallingUid());
 }
 
 status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
@@ -702,4 +717,25 @@
     return mAudioPolicyManager->stopAudioSource(handle);
 }
 
+status_t AudioPolicyService::setMasterMono(bool mono)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->setMasterMono(mono);
+}
+
+status_t AudioPolicyService::getMasterMono(bool *mono)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->getMasterMono(mono);
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
index 13af3ef..7c9315d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
@@ -234,6 +234,7 @@
 status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *input,
                                              audio_session_t session,
+                                             pid_t pid __unused,
                                              uid_t uid __unused,
                                              uint32_t samplingRate,
                                              audio_format_t format,
@@ -427,7 +428,7 @@
 status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
                                 audio_io_handle_t io,
                                 uint32_t strategy,
-                                int session,
+                                audio_session_t session,
                                 int id)
 {
     if (mpAudioPolicy == NULL) {
@@ -488,7 +489,7 @@
     return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
 }
 
-status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
+status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
                                                        effect_descriptor_t *descriptors,
                                                        uint32_t *count)
 {
@@ -619,4 +620,14 @@
     return INVALID_OPERATION;
 }
 
+status_t AudioPolicyService::setMasterMono(bool mono)
+{
+    return INVALID_OPERATION;
+}
+
+status_t AudioPolicyService::getMasterMono(bool *mono)
+{
+    return INVALID_OPERATION;
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index c77cc45..a6cd50e 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -149,7 +149,10 @@
 // connects to AudioPolicyService.
 void AudioPolicyService::registerClient(const sp<IAudioPolicyServiceClient>& client)
 {
-
+    if (client == 0) {
+        ALOGW("%s got NULL client", __FUNCTION__);
+        return;
+    }
     Mutex::Autolock _l(mNotificationClientsLock);
 
     uid_t uid = IPCThreadState::self()->getCallingUid();
@@ -212,19 +215,6 @@
     mOutputCommandThread->updateAudioPatchListCommand();
 }
 
-status_t AudioPolicyService::clientCreateAudioPatch(const struct audio_patch *patch,
-                                                audio_patch_handle_t *handle,
-                                                int delayMs)
-{
-    return mAudioCommandThread->createAudioPatchCommand(patch, handle, delayMs);
-}
-
-status_t AudioPolicyService::clientReleaseAudioPatch(audio_patch_handle_t handle,
-                                                 int delayMs)
-{
-    return mAudioCommandThread->releaseAudioPatchCommand(handle, delayMs);
-}
-
 void AudioPolicyService::doOnAudioPatchListUpdate()
 {
     Mutex::Autolock _l(mNotificationClientsLock);
@@ -248,6 +238,38 @@
     }
 }
 
+void AudioPolicyService::onRecordingConfigurationUpdate(int event, audio_session_t session,
+        audio_source_t source, const audio_config_base_t *clientConfig,
+        const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle)
+{
+    mOutputCommandThread->recordingConfigurationUpdateCommand(event, session, source,
+            clientConfig, deviceConfig, patchHandle);
+}
+
+void AudioPolicyService::doOnRecordingConfigurationUpdate(int event, audio_session_t session,
+        audio_source_t source, const audio_config_base_t *clientConfig,
+        const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle)
+{
+    Mutex::Autolock _l(mNotificationClientsLock);
+    for (size_t i = 0; i < mNotificationClients.size(); i++) {
+        mNotificationClients.valueAt(i)->onRecordingConfigurationUpdate(event, session, source,
+                clientConfig, deviceConfig, patchHandle);
+    }
+}
+
+status_t AudioPolicyService::clientCreateAudioPatch(const struct audio_patch *patch,
+                                                audio_patch_handle_t *handle,
+                                                int delayMs)
+{
+    return mAudioCommandThread->createAudioPatchCommand(patch, handle, delayMs);
+}
+
+status_t AudioPolicyService::clientReleaseAudioPatch(audio_patch_handle_t handle,
+                                                 int delayMs)
+{
+    return mAudioCommandThread->releaseAudioPatchCommand(handle, delayMs);
+}
+
 status_t AudioPolicyService::clientSetAudioPortConfig(const struct audio_port_config *config,
                                                       int delayMs)
 {
@@ -293,7 +315,18 @@
         String8 regId, int32_t state)
 {
     if (mAudioPolicyServiceClient != 0) {
-            mAudioPolicyServiceClient->onDynamicPolicyMixStateUpdate(regId, state);
+        mAudioPolicyServiceClient->onDynamicPolicyMixStateUpdate(regId, state);
+    }
+}
+
+void AudioPolicyService::NotificationClient::onRecordingConfigurationUpdate(
+        int event, audio_session_t session, audio_source_t source,
+        const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+        audio_patch_handle_t patchHandle)
+{
+    if (mAudioPolicyServiceClient != 0) {
+        mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, session, source,
+                clientConfig, deviceConfig, patchHandle);
     }
 }
 
@@ -423,7 +456,7 @@
 
 bool AudioPolicyService::AudioCommandThread::threadLoop()
 {
-    nsecs_t waitTime = INT64_MAX;
+    nsecs_t waitTime = -1;
 
     mLock.lock();
     while (!exitPending())
@@ -555,7 +588,6 @@
                 case DYN_POLICY_MIX_STATE_UPDATE: {
                     DynPolicyMixStateUpdateData *data =
                             (DynPolicyMixStateUpdateData *)command->mParam.get();
-                    //###ALOGV("AudioCommandThread() processing dyn policy mix state update");
                     ALOGV("AudioCommandThread() processing dyn policy mix state update %s %d",
                             data->mRegId.string(), data->mState);
                     svc = mService.promote();
@@ -566,6 +598,20 @@
                     svc->doOnDynamicPolicyMixStateUpdate(data->mRegId, data->mState);
                     mLock.lock();
                     } break;
+                case RECORDING_CONFIGURATION_UPDATE: {
+                    RecordingConfigurationUpdateData *data =
+                            (RecordingConfigurationUpdateData *)command->mParam.get();
+                    ALOGV("AudioCommandThread() processing recording configuration update");
+                    svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doOnRecordingConfigurationUpdate(data->mEvent, data->mSession,
+                            data->mSource, &data->mClientConfig, &data->mDeviceConfig,
+                            data->mPatchHandle);
+                    mLock.lock();
+                    } break;
                 default:
                     ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
                 }
@@ -576,7 +622,7 @@
                         command->mCond.signal();
                     }
                 }
-                waitTime = INT64_MAX;
+                waitTime = -1;
                 // release mLock before releasing strong reference on the service as
                 // AudioPolicyService destructor calls AudioCommandThread::exit() which
                 // acquires mLock.
@@ -598,7 +644,11 @@
         // has a finite delay. So unless we are exiting it is safe to wait.
         if (!exitPending()) {
             ALOGV("AudioCommandThread() going to sleep");
-            mWaitWorkCV.waitRelative(mLock, waitTime);
+            if (waitTime == -1) {
+                mWaitWorkCV.wait(mLock);
+            } else {
+                mWaitWorkCV.waitRelative(mLock, waitTime);
+            }
         }
     }
     // release delayed commands wake lock before quitting
@@ -822,6 +872,26 @@
     sendCommand(command);
 }
 
+void AudioPolicyService::AudioCommandThread::recordingConfigurationUpdateCommand(
+        int event, audio_session_t session, audio_source_t source,
+        const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+        audio_patch_handle_t patchHandle)
+{
+    sp<AudioCommand>command = new AudioCommand();
+    command->mCommand = RECORDING_CONFIGURATION_UPDATE;
+    RecordingConfigurationUpdateData *data = new RecordingConfigurationUpdateData();
+    data->mEvent = event;
+    data->mSession = session;
+    data->mSource = source;
+    data->mClientConfig = *clientConfig;
+    data->mDeviceConfig = *deviceConfig;
+    data->mPatchHandle = patchHandle;
+    command->mParam = data;
+    ALOGV("AudioCommandThread() adding recording configuration update event %d, source %d",
+            event, source);
+    sendCommand(command);
+}
+
 status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
 {
     {
@@ -968,6 +1038,10 @@
 
         } break;
 
+        case RECORDING_CONFIGURATION_UPDATE: {
+
+        } break;
+
         case START_TONE:
         case STOP_TONE:
         default:
@@ -1108,7 +1182,7 @@
                                                   audio_channel_mask_t *pChannelMask);
 int aps_close_input(void *service __unused, audio_io_handle_t input);
 int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream);
-int aps_move_effects(void *service __unused, int session,
+int aps_move_effects(void *service __unused, audio_session_t session,
                                 audio_io_handle_t src_output,
                                 audio_io_handle_t dst_output);
 char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index a0d5aa2..0b2cb35 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -99,6 +99,7 @@
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
+                                     pid_t pid,
                                      uid_t uid,
                                      uint32_t samplingRate,
                                      audio_format_t format,
@@ -128,7 +129,7 @@
     virtual status_t registerEffect(const effect_descriptor_t *desc,
                                     audio_io_handle_t io,
                                     uint32_t strategy,
-                                    int session,
+                                    audio_session_t session,
                                     int id);
     virtual status_t unregisterEffect(int id);
     virtual status_t setEffectEnabled(int id, bool enabled);
@@ -136,7 +137,7 @@
     virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
     virtual bool isSourceActive(audio_source_t source) const;
 
-    virtual status_t queryDefaultPreProcessing(int audioSession,
+    virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count);
     virtual     status_t    onTransact(
@@ -202,6 +203,9 @@
                                       audio_io_handle_t *handle);
     virtual status_t stopAudioSource(audio_io_handle_t handle);
 
+    virtual status_t setMasterMono(bool mono);
+    virtual status_t getMasterMono(bool *mono);
+
             status_t doStopOutput(audio_io_handle_t output,
                                   audio_stream_type_t stream,
                                   audio_session_t session);
@@ -225,6 +229,12 @@
 
             void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
             void doOnDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+            void onRecordingConfigurationUpdate(int event, audio_session_t session,
+                    audio_source_t source, const audio_config_base_t *clientConfig,
+                    const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
+            void doOnRecordingConfigurationUpdate(int event, audio_session_t session,
+                    audio_source_t source, const audio_config_base_t *clientConfig,
+                    const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
 
 private:
                         AudioPolicyService() ANDROID_API;
@@ -256,7 +266,8 @@
             UPDATE_AUDIOPORT_LIST,
             UPDATE_AUDIOPATCH_LIST,
             SET_AUDIOPORT_CONFIG,
-            DYN_POLICY_MIX_STATE_UPDATE
+            DYN_POLICY_MIX_STATE_UPDATE,
+            RECORDING_CONFIGURATION_UPDATE
         };
 
         AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
@@ -295,6 +306,12 @@
                     status_t    setAudioPortConfigCommand(const struct audio_port_config *config,
                                                           int delayMs);
                     void        dynamicPolicyMixStateUpdateCommand(String8 regId, int32_t state);
+                    void        recordingConfigurationUpdateCommand(
+                                                        int event, audio_session_t session,
+                                                        audio_source_t source,
+                                                        const audio_config_base_t *clientConfig,
+                                                        const audio_config_base_t *deviceConfig,
+                                                        audio_patch_handle_t patchHandle);
                     void        insertCommand_l(AudioCommand *command, int delayMs = 0);
 
     private:
@@ -385,6 +402,16 @@
             int32_t mState;
         };
 
+        class RecordingConfigurationUpdateData : public AudioCommandData {
+        public:
+            int mEvent;
+            audio_session_t mSession;
+            audio_source_t mSource;
+            struct audio_config_base mClientConfig;
+            struct audio_config_base mDeviceConfig;
+            audio_patch_handle_t mPatchHandle;
+        };
+
         Mutex   mLock;
         Condition mWaitWorkCV;
         Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
@@ -472,7 +499,7 @@
         virtual status_t setVoiceVolume(float volume, int delayMs = 0);
 
         // move effect to the specified output
-        virtual status_t moveEffects(int session,
+        virtual status_t moveEffects(audio_session_t session,
                                          audio_io_handle_t srcOutput,
                                          audio_io_handle_t dstOutput);
 
@@ -491,8 +518,12 @@
         virtual void onAudioPortListUpdate();
         virtual void onAudioPatchListUpdate();
         virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+        virtual void onRecordingConfigurationUpdate(int event,
+                        audio_session_t session, audio_source_t source,
+                        const audio_config_base_t *clientConfig,
+                        const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
 
-        virtual audio_unique_id_t newAudioUniqueId();
+        virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
 
      private:
         AudioPolicyService *mAudioPolicyService;
@@ -509,6 +540,12 @@
                             void      onAudioPortListUpdate();
                             void      onAudioPatchListUpdate();
                             void      onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+                            void      onRecordingConfigurationUpdate(
+                                        int event, audio_session_t session,
+                                        audio_source_t source,
+                                        const audio_config_base_t *clientConfig,
+                                        const audio_config_base_t *deviceConfig,
+                                        audio_patch_handle_t patchHandle);
                             void      setAudioPortCallbacksEnabled(bool enabled);
 
                 // IBinder::DeathRecipient
@@ -530,6 +567,10 @@
 
     mutable Mutex mLock;    // prevents concurrent access to AudioPolicy manager functions changing
                             // device connection state  or routing
+    mutable Mutex mEffectsLock; // serialize access to Effect state within APM.
+    // Note: lock acquisition order is always mLock > mEffectsLock:
+    // mLock protects AudioPolicyManager methods that can call into audio flinger
+    // and possibly back in to audio policy service and acquire mEffectsLock.
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
     sp<AudioCommandThread> mTonePlaybackThread;     // tone playback thread
     sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 45900c4..ebe65e4 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -20,9 +20,10 @@
 
 include $(CLEAR_VARS)
 
-LOCAL_SRC_FILES:=               \
+# Camera service source
+
+LOCAL_SRC_FILES :=  \
     CameraService.cpp \
-    CameraDeviceFactory.cpp \
     CameraFlashlight.cpp \
     common/Camera2ClientBase.cpp \
     common/CameraDeviceBase.cpp \
@@ -35,14 +36,10 @@
     api1/client2/StreamingProcessor.cpp \
     api1/client2/JpegProcessor.cpp \
     api1/client2/CallbackProcessor.cpp \
-    api1/client2/ZslProcessor.cpp \
-    api1/client2/ZslProcessorInterface.cpp \
-    api1/client2/BurstCapture.cpp \
     api1/client2/JpegCompressor.cpp \
     api1/client2/CaptureSequencer.cpp \
-    api1/client2/ZslProcessor3.cpp \
+    api1/client2/ZslProcessor.cpp \
     api2/CameraDeviceClient.cpp \
-    device2/Camera2Device.cpp \
     device3/Camera3Device.cpp \
     device3/Camera3Stream.cpp \
     device3/Camera3IOStreamBase.cpp \
@@ -51,6 +48,7 @@
     device3/Camera3ZslStream.cpp \
     device3/Camera3DummyStream.cpp \
     device3/StatusTracker.cpp \
+    device3/Camera3BufferManager.cpp \
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
     utils/AutoConditionLock.cpp
@@ -68,16 +66,18 @@
     libhardware \
     libsync \
     libcamera_metadata \
-    libjpeg
+    libjpeg \
+    libmemunreachable
 
 LOCAL_C_INCLUDES += \
-    system/media/camera/include \
     system/media/private/camera/include \
     frameworks/native/include/media/openmax \
     external/jpeg
 
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+    frameworks/av/services/camera/libcameraservice
 
-LOCAL_CFLAGS += -Wall -Wextra
+LOCAL_CFLAGS += -Wall -Wextra -Werror
 
 LOCAL_MODULE:= libcameraservice
 
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp
deleted file mode 100644
index 6589e27..0000000
--- a/services/camera/libcameraservice/CameraDeviceFactory.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "CameraDeviceFactory"
-#include <utils/Log.h>
-
-#include "CameraService.h"
-#include "CameraDeviceFactory.h"
-#include "common/CameraDeviceBase.h"
-#include "device2/Camera2Device.h"
-#include "device3/Camera3Device.h"
-
-namespace android {
-
-wp<CameraService> CameraDeviceFactory::sService;
-
-sp<CameraDeviceBase> CameraDeviceFactory::createDevice(int cameraId) {
-
-    sp<CameraService> svc = sService.promote();
-    if (svc == 0) {
-        ALOGE("%s: No service registered", __FUNCTION__);
-        return NULL;
-    }
-
-    int deviceVersion = svc->getDeviceVersion(cameraId, /*facing*/NULL);
-
-    sp<CameraDeviceBase> device;
-
-    switch (deviceVersion) {
-        case CAMERA_DEVICE_API_VERSION_2_0:
-        case CAMERA_DEVICE_API_VERSION_2_1:
-            device = new Camera2Device(cameraId);
-            break;
-        case CAMERA_DEVICE_API_VERSION_3_0:
-        case CAMERA_DEVICE_API_VERSION_3_1:
-        case CAMERA_DEVICE_API_VERSION_3_2:
-        case CAMERA_DEVICE_API_VERSION_3_3:
-            device = new Camera3Device(cameraId);
-            break;
-        default:
-            ALOGE("%s: Camera %d: Unknown HAL device version %d",
-                  __FUNCTION__, cameraId, deviceVersion);
-            device = NULL;
-            break;
-    }
-
-    ALOGV_IF(device != 0, "Created a new camera device for version %d",
-                          deviceVersion);
-
-    return device;
-}
-
-void CameraDeviceFactory::registerService(wp<CameraService> service) {
-    ALOGV("%s: Registered service %p", __FUNCTION__,
-          service.promote().get());
-
-    sService = service;
-}
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.h b/services/camera/libcameraservice/CameraDeviceFactory.h
deleted file mode 100644
index 236dc56..0000000
--- a/services/camera/libcameraservice/CameraDeviceFactory.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEFACTORY_H
-#define ANDROID_SERVERS_CAMERA_CAMERADEVICEFACTORY_H
-
-#include <utils/RefBase.h>
-
-namespace android {
-
-class CameraDeviceBase;
-class CameraService;
-
-/**
- * Create the right instance of Camera2Device or Camera3Device
- * automatically based on the device version.
- */
-class CameraDeviceFactory : public virtual RefBase {
-  public:
-    static void registerService(wp<CameraService> service);
-
-    // Prerequisite: Call registerService.
-    static sp<CameraDeviceBase> createDevice(int cameraId);
-  private:
-    CameraDeviceFactory(wp<CameraService> service);
-
-    static wp<CameraService> sService;
-};
-
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index 406c1c4..ad08a68 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -27,7 +27,7 @@
 #include "gui/IGraphicBufferConsumer.h"
 #include "gui/BufferQueue.h"
 #include "camera/camera2/CaptureRequest.h"
-#include "CameraDeviceFactory.h"
+#include "device3/Camera3Device.h"
 
 
 namespace android {
@@ -78,7 +78,7 @@
             deviceVersion = info.device_version;
         }
 
-        if (deviceVersion >= CAMERA_DEVICE_API_VERSION_2_0) {
+        if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
             CameraDeviceClientFlashControl *flashControl =
                     new CameraDeviceClientFlashControl(*mCameraModule,
                                                        *mCallbacks);
@@ -193,8 +193,6 @@
 }
 
 bool CameraFlashlight::hasFlashUnit(const String8& cameraId) {
-    status_t res;
-
     Mutex::Autolock l(mLock);
     return hasFlashUnitLocked(cameraId);
 }
@@ -302,7 +300,8 @@
 /////////////////////////////////////////////////////////////////////
 ModuleFlashControl::ModuleFlashControl(CameraModule& cameraModule,
         const camera_module_callbacks_t& callbacks) :
-    mCameraModule(&cameraModule) {
+        mCameraModule(&cameraModule) {
+    (void) callbacks;
 }
 
 ModuleFlashControl::~ModuleFlashControl() {
@@ -478,7 +477,7 @@
     }
 
     sp<CameraDeviceBase> device =
-            CameraDeviceFactory::createDevice(atoi(cameraId.string()));
+            new Camera3Device(atoi(cameraId.string()));
     if (device == NULL) {
         return NO_MEMORY;
     }
@@ -680,7 +679,8 @@
     status_t res;
     if (enabled) {
         bool hasFlash = false;
-        res = hasFlashUnitLocked(cameraId, &hasFlash);
+        // Check if it has a flash unit and leave camera device open.
+        res = hasFlashUnitLocked(cameraId, &hasFlash, /*keepDeviceOpen*/true);
         // invalid camera?
         if (res) {
             // hasFlashUnitLocked() returns BAD_INDEX if mDevice is connected to
@@ -689,6 +689,8 @@
         }
         // no flash unit?
         if (!hasFlash) {
+            // Disconnect camera device if it has no flash.
+            disconnectCameraDevice();
             return -ENOSYS;
         }
     } else if (mDevice == NULL || cameraId != mCameraId) {
@@ -717,21 +719,28 @@
 status_t CameraHardwareInterfaceFlashControl::hasFlashUnit(
         const String8& cameraId, bool *hasFlash) {
     Mutex::Autolock l(mLock);
-    return hasFlashUnitLocked(cameraId, hasFlash);
+    // Close device after checking if it has a flash unit.
+    return hasFlashUnitLocked(cameraId, hasFlash, /*keepDeviceOpen*/false);
 }
 
 status_t CameraHardwareInterfaceFlashControl::hasFlashUnitLocked(
-        const String8& cameraId, bool *hasFlash) {
+        const String8& cameraId, bool *hasFlash, bool keepDeviceOpen) {
+    bool closeCameraDevice = false;
+
     if (!hasFlash) {
         return BAD_VALUE;
     }
 
     status_t res;
     if (mDevice == NULL) {
+        // Connect to camera device to query if it has a flash unit.
         res = connectCameraDevice(cameraId);
         if (res) {
             return res;
         }
+        // Close camera device only when it is just opened and the caller doesn't want to keep
+        // the camera device open.
+        closeCameraDevice = !keepDeviceOpen;
     }
 
     if (cameraId != mCameraId) {
@@ -746,6 +755,15 @@
         *hasFlash = false;
     }
 
+    if (closeCameraDevice) {
+        res = disconnectCameraDevice();
+        if (res != OK) {
+            ALOGE("%s: Failed to disconnect camera device. %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
     return OK;
 }
 
@@ -870,9 +888,13 @@
         return OK;
     }
 
-    mParameters.set(CameraParameters::KEY_FLASH_MODE,
-            CameraParameters::FLASH_MODE_OFF);
-    mDevice->setParameters(mParameters);
+    if (mParameters.get(CameraParameters::KEY_FLASH_MODE)) {
+        // There is a flash, turn if off.
+        // (If there isn't one, leave the parameter null)
+        mParameters.set(CameraParameters::KEY_FLASH_MODE,
+                CameraParameters::FLASH_MODE_OFF);
+        mDevice->setParameters(mParameters);
+    }
     mDevice->stopPreview();
     status_t res = native_window_api_disconnect(mSurface.get(),
             NATIVE_WINDOW_API_CAMERA);
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index 4d5fe8d..5cde372 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -203,7 +203,11 @@
         status_t getSmallestSurfaceSize(int32_t *width, int32_t *height);
 
         // protected by mLock
-        status_t hasFlashUnitLocked(const String8& cameraId, bool *hasFlash);
+        // If this function opens camera device in order to check if it has a flash unit, the
+        // camera device will remain open if keepDeviceOpen is true and the camera device will be
+        // closed if keepDeviceOpen is false. If camera device is already open when calling this
+        // function, keepDeviceOpen is ignored.
+        status_t hasFlashUnitLocked(const String8& cameraId, bool *hasFlash, bool keepDeviceOpen);
 
         CameraModule *mCameraModule;
         const camera_module_callbacks_t *mCallbacks;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index a560b93..ff73c28 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -28,6 +28,9 @@
 #include <inttypes.h>
 #include <pthread.h>
 
+#include <android/hardware/ICamera.h>
+#include <android/hardware/ICameraClient.h>
+
 #include <binder/AppOpsManager.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
@@ -38,6 +41,7 @@
 #include <cutils/properties.h>
 #include <gui/Surface.h>
 #include <hardware/hardware.h>
+#include <memunreachable/memunreachable.h>
 #include <media/AudioSystem.h>
 #include <media/IMediaHTTPService.h>
 #include <media/mediaplayer.h>
@@ -46,6 +50,7 @@
 #include <utils/Log.h>
 #include <utils/String16.h>
 #include <utils/Trace.h>
+#include <private/android_filesystem_config.h>
 #include <system/camera_vendor_tags.h>
 #include <system/camera_metadata.h>
 #include <system/camera.h>
@@ -55,10 +60,16 @@
 #include "api1/Camera2Client.h"
 #include "api2/CameraDeviceClient.h"
 #include "utils/CameraTraces.h"
-#include "CameraDeviceFactory.h"
+
+namespace {
+    const char* kPermissionServiceName = "permission";
+}; // namespace anonymous
 
 namespace android {
 
+using binder::Status;
+using namespace hardware;
+
 // ----------------------------------------------------------------------------
 // Logging support -- this is for debugging only
 // Use "adb shell dumpsys media.camera -v 1" to change it.
@@ -71,6 +82,17 @@
     android_atomic_write(level, &gLogLevel);
 }
 
+// Convenience methods for constructing binder::Status objects for error returns
+
+#define STATUS_ERROR(errorCode, errorString) \
+    binder::Status::fromServiceSpecificError(errorCode, \
+            String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+    binder::Status::fromServiceSpecificError(errorCode, \
+            String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, \
+                    __VA_ARGS__))
+
 // ----------------------------------------------------------------------------
 
 extern "C" {
@@ -96,7 +118,7 @@
     sp<CameraService> cs = const_cast<CameraService*>(
                                 static_cast<const CameraService*>(callbacks));
 
-    ICameraServiceListener::TorchStatus status;
+    int32_t status;
     switch (new_status) {
         case TORCH_MODE_STATUS_NOT_AVAILABLE:
             status = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
@@ -124,8 +146,10 @@
 // should be ok for now.
 static CameraService *gCameraService;
 
-CameraService::CameraService() : mEventLog(DEFAULT_EVENT_LOG_LENGTH), mAllowedUsers(),
-        mSoundRef(0), mModule(0), mFlashlight(0) {
+CameraService::CameraService() :
+        mEventLog(DEFAULT_EVENT_LOG_LENGTH),
+        mNumberOfCameras(0), mNumberOfNormalCameras(0),
+        mSoundRef(0), mModule(nullptr) {
     ALOGI("CameraService started (pid=%d)", getpid());
     gCameraService = this;
 
@@ -152,8 +176,6 @@
     if (err < 0) {
         ALOGE("Could not load camera HAL module: %d (%s)", err, strerror(-err));
         logServiceError("Could not load camera HAL module", err);
-        mNumberOfCameras = 0;
-        mNumberOfNormalCameras = 0;
         return;
     }
 
@@ -164,7 +186,6 @@
             strerror(-err));
         logServiceError("Could not initialize camera HAL module", err);
 
-        mNumberOfCameras = 0;
         delete mModule;
         mModule = nullptr;
         return;
@@ -247,8 +268,6 @@
         mModule->setCallbacks(this);
     }
 
-    CameraDeviceFactory::registerService(this);
-
     CameraService::pingCameraServiceProxy();
 }
 
@@ -290,9 +309,9 @@
         return;
     }
 
-    ICameraServiceListener::Status oldStatus = state->getStatus();
+    int32_t oldStatus = state->getStatus();
 
-    if (oldStatus == static_cast<ICameraServiceListener::Status>(newStatus)) {
+    if (oldStatus == static_cast<int32_t>(newStatus)) {
         ALOGE("%s: State transition to the same status %#x not allowed", __FUNCTION__, newStatus);
         return;
     }
@@ -316,10 +335,9 @@
             clientToDisconnect = removeClientLocked(id);
 
             // Notify the client of disconnection
-            if (clientToDisconnect != nullptr) {
-                clientToDisconnect->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
-                        CaptureResultExtras{});
-            }
+            clientToDisconnect->notifyError(
+                    hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                    CaptureResultExtras{});
         }
 
         ALOGI("%s: Client for camera ID %s evicted due to device status change from HAL",
@@ -334,27 +352,27 @@
         }
 
     } else {
-        if (oldStatus == ICameraServiceListener::Status::STATUS_NOT_PRESENT) {
+        if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
             logDeviceAdded(id, String8::format("Device status changed from %d to %d", oldStatus,
                     newStatus));
         }
-        updateStatus(static_cast<ICameraServiceListener::Status>(newStatus), id);
+        updateStatus(static_cast<int32_t>(newStatus), id);
     }
 
 }
 
 void CameraService::onTorchStatusChanged(const String8& cameraId,
-        ICameraServiceListener::TorchStatus newStatus) {
+        int32_t newStatus) {
     Mutex::Autolock al(mTorchStatusMutex);
     onTorchStatusChangedLocked(cameraId, newStatus);
 }
 
 void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
-        ICameraServiceListener::TorchStatus newStatus) {
+        int32_t newStatus) {
     ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
             __FUNCTION__, cameraId.string(), newStatus);
 
-    ICameraServiceListener::TorchStatus status;
+    int32_t status;
     status_t res = getTorchStatusLocked(cameraId, &status);
     if (res) {
         ALOGE("%s: cannot get torch status of camera %s: %s (%d)",
@@ -367,7 +385,8 @@
 
     res = setTorchStatusLocked(cameraId, newStatus);
     if (res) {
-        ALOGE("%s: Failed to set the torch status", __FUNCTION__, (uint32_t)newStatus);
+        ALOGE("%s: Failed to set the torch status to %d: %s (%d)", __FUNCTION__,
+                (uint32_t)newStatus, strerror(-res), res);
         return;
     }
 
@@ -407,41 +426,51 @@
     }
 }
 
-int32_t CameraService::getNumberOfCameras() {
-    ATRACE_CALL();
-    return getNumberOfCameras(CAMERA_TYPE_BACKWARD_COMPATIBLE);
-}
-
-int32_t CameraService::getNumberOfCameras(int type) {
+Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
     ATRACE_CALL();
     switch (type) {
         case CAMERA_TYPE_BACKWARD_COMPATIBLE:
-            return mNumberOfNormalCameras;
+            *numCameras = mNumberOfNormalCameras;
+            break;
         case CAMERA_TYPE_ALL:
-            return mNumberOfCameras;
+            *numCameras = mNumberOfCameras;
+            break;
         default:
-            ALOGW("%s: Unknown camera type %d, returning 0",
+            ALOGW("%s: Unknown camera type %d",
                     __FUNCTION__, type);
-            return 0;
+            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Unknown camera type %d", type);
     }
+    return Status::ok();
 }
 
-status_t CameraService::getCameraInfo(int cameraId,
-                                      struct CameraInfo* cameraInfo) {
+Status CameraService::getCameraInfo(int cameraId,
+        CameraInfo* cameraInfo) {
     ATRACE_CALL();
     if (!mModule) {
-        return -ENODEV;
+        return STATUS_ERROR(ERROR_DISCONNECTED,
+                "Camera subsystem is not available");
     }
 
     if (cameraId < 0 || cameraId >= mNumberOfCameras) {
-        return BAD_VALUE;
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+                "CameraId is not valid");
     }
 
     struct camera_info info;
-    status_t rc = filterGetInfoErrorCode(
+    Status rc = filterGetInfoErrorCode(
         mModule->getCameraInfo(cameraId, &info));
-    cameraInfo->facing = info.facing;
-    cameraInfo->orientation = info.orientation;
+
+    if (rc.isOk()) {
+        cameraInfo->facing = info.facing;
+        cameraInfo->orientation = info.orientation;
+        // CameraInfo is for android.hardware.Camera which does not
+        // support external camera facing. The closest approximation would be
+        // front camera.
+        if (cameraInfo->orientation == CAMERA_FACING_EXTERNAL) {
+            cameraInfo->orientation = CAMERA_FACING_FRONT;
+        }
+    }
     return rc;
 }
 
@@ -455,28 +484,33 @@
     return ret;
 }
 
-status_t CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
+Status CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
     ATRACE_CALL();
-    status_t ret = OK;
+
+    Status ret = Status::ok();
+
     struct CameraInfo info;
-    if ((ret = getCameraInfo(cameraId, &info)) != OK) {
+    if (!(ret = getCameraInfo(cameraId, &info)).isOk()) {
         return ret;
     }
 
     CameraMetadata shimInfo;
     int32_t orientation = static_cast<int32_t>(info.orientation);
-    if ((ret = shimInfo.update(ANDROID_SENSOR_ORIENTATION, &orientation, 1)) != OK) {
-        return ret;
+    status_t rc;
+    if ((rc = shimInfo.update(ANDROID_SENSOR_ORIENTATION, &orientation, 1)) != OK) {
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                "Error updating metadata: %d (%s)", rc, strerror(-rc));
     }
 
     uint8_t facing = (info.facing == CAMERA_FACING_FRONT) ?
             ANDROID_LENS_FACING_FRONT : ANDROID_LENS_FACING_BACK;
-    if ((ret = shimInfo.update(ANDROID_LENS_FACING, &facing, 1)) != OK) {
-        return ret;
+    if ((rc = shimInfo.update(ANDROID_LENS_FACING, &facing, 1)) != OK) {
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                "Error updating metadata: %d (%s)", rc, strerror(-rc));
     }
 
     CameraParameters shimParams;
-    if ((ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)) != OK) {
+    if (!(ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)).isOk()) {
         // Error logged by callee
         return ret;
     }
@@ -484,7 +518,6 @@
     Vector<Size> sizes;
     Vector<Size> jpegSizes;
     Vector<int32_t> formats;
-    const char* supportedPreviewFormats;
     {
         shimParams.getSupportedPreviewSizes(/*out*/sizes);
         shimParams.getSupportedPreviewFormats(/*out*/formats);
@@ -518,51 +551,56 @@
         streamConfigs.add(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
     }
 
-    if ((ret = shimInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+    if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
             streamConfigs.array(), streamConfigSize)) != OK) {
-        return ret;
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                "Error updating metadata: %d (%s)", rc, strerror(-rc));
     }
 
     int64_t fakeMinFrames[0];
     // TODO: Fixme, don't fake min frame durations.
-    if ((ret = shimInfo.update(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+    if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
             fakeMinFrames, 0)) != OK) {
-        return ret;
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                "Error updating metadata: %d (%s)", rc, strerror(-rc));
     }
 
     int64_t fakeStalls[0];
     // TODO: Fixme, don't fake stall durations.
-    if ((ret = shimInfo.update(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+    if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
             fakeStalls, 0)) != OK) {
-        return ret;
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                "Error updating metadata: %d (%s)", rc, strerror(-rc));
     }
 
     *cameraInfo = shimInfo;
-    return OK;
+    return ret;
 }
 
-status_t CameraService::getCameraCharacteristics(int cameraId,
+Status CameraService::getCameraCharacteristics(int cameraId,
                                                 CameraMetadata* cameraInfo) {
     ATRACE_CALL();
     if (!cameraInfo) {
         ALOGE("%s: cameraInfo is NULL", __FUNCTION__);
-        return BAD_VALUE;
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "cameraInfo is NULL");
     }
 
     if (!mModule) {
         ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
-        return -ENODEV;
+        return STATUS_ERROR(ERROR_DISCONNECTED,
+                "Camera subsystem is not available");;
     }
 
     if (cameraId < 0 || cameraId >= mNumberOfCameras) {
         ALOGE("%s: Invalid camera id: %d", __FUNCTION__, cameraId);
-        return BAD_VALUE;
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                "Invalid camera id: %d", cameraId);
     }
 
     int facing;
-    status_t ret = OK;
+    Status ret;
     if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0 ||
-            getDeviceVersion(cameraId, &facing) <= CAMERA_DEVICE_API_VERSION_2_1 ) {
+            getDeviceVersion(cameraId, &facing) < CAMERA_DEVICE_API_VERSION_3_0) {
         /**
          * Backwards compatibility mode for old HALs:
          * - Convert CameraInfo into static CameraMetadata properties.
@@ -573,17 +611,16 @@
          */
         ALOGI("%s: Switching to HAL1 shim implementation...", __FUNCTION__);
 
-        if ((ret = generateShimMetadata(cameraId, cameraInfo)) != OK) {
-            return ret;
-        }
-
+        ret = generateShimMetadata(cameraId, cameraInfo);
     } else {
         /**
          * Normal HAL 2.1+ codepath.
          */
         struct camera_info info;
         ret = filterGetInfoErrorCode(mModule->getCameraInfo(cameraId, &info));
-        *cameraInfo = info.static_camera_characteristics;
+        if (ret.isOk()) {
+            *cameraInfo = info.static_camera_characteristics;
+        }
     }
 
     return ret;
@@ -620,15 +657,19 @@
     return INT_MAX - procState;
 }
 
-status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
+Status CameraService::getCameraVendorTagDescriptor(
+        /*out*/
+        hardware::camera2::params::VendorTagDescriptor* desc) {
     ATRACE_CALL();
     if (!mModule) {
         ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
-        return -ENODEV;
+        return STATUS_ERROR(ERROR_DISCONNECTED, "Camera subsystem not available");
     }
-
-    desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
-    return OK;
+    sp<VendorTagDescriptor> globalDescriptor = VendorTagDescriptor::getGlobalVendorTagDescriptor();
+    if (globalDescriptor != nullptr) {
+        *desc = *(globalDescriptor.get());
+    }
+    return Status::ok();
 }
 
 int CameraService::getDeviceVersion(int cameraId, int* facing) {
@@ -652,15 +693,21 @@
     return deviceVersion;
 }
 
-status_t CameraService::filterGetInfoErrorCode(status_t err) {
+Status CameraService::filterGetInfoErrorCode(status_t err) {
     switch(err) {
         case NO_ERROR:
+            return Status::ok();
         case -EINVAL:
-            return err;
+            return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+                    "CameraId is not valid for HAL module");
+        case -ENODEV:
+            return STATUS_ERROR(ERROR_DISCONNECTED,
+                    "Camera device not available");
         default:
-            break;
+            return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                    "Camera HAL encountered error %d: %s",
+                    err, strerror(-err));
     }
-    return -ENODEV;
 }
 
 bool CameraService::setUpVendorTags() {
@@ -700,20 +747,12 @@
     return true;
 }
 
-status_t CameraService::makeClient(const sp<CameraService>& cameraService,
-        const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
+Status CameraService::makeClient(const sp<CameraService>& cameraService,
+        const sp<IInterface>& cameraCb, const String16& packageName, int cameraId,
         int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
         int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
         /*out*/sp<BasicClient>* client) {
 
-    // TODO: Update CameraClients + HAL interface to use strings for Camera IDs
-    int id = cameraIdToInt(cameraId);
-    if (id == -1) {
-        ALOGE("%s: Invalid camera ID %s, cannot convert to integer.", __FUNCTION__,
-                cameraId.string());
-        return BAD_VALUE;
-    }
-
     if (halVersion < 0 || halVersion == deviceVersion) {
         // Default path: HAL version is unspecified by caller, create CameraClient
         // based on device version reported by the HAL.
@@ -721,34 +760,37 @@
           case CAMERA_DEVICE_API_VERSION_1_0:
             if (effectiveApiLevel == API_1) {  // Camera1 API route
                 sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
-                *client = new CameraClient(cameraService, tmp, packageName, id, facing,
+                *client = new CameraClient(cameraService, tmp, packageName, cameraId, facing,
                         clientPid, clientUid, getpid(), legacyMode);
             } else { // Camera2 API route
                 ALOGW("Camera using old HAL version: %d", deviceVersion);
-                return -EOPNOTSUPP;
+                return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
+                        "Camera device \"%d\" HAL version %d does not support camera2 API",
+                        cameraId, deviceVersion);
             }
             break;
-          case CAMERA_DEVICE_API_VERSION_2_0:
-          case CAMERA_DEVICE_API_VERSION_2_1:
           case CAMERA_DEVICE_API_VERSION_3_0:
           case CAMERA_DEVICE_API_VERSION_3_1:
           case CAMERA_DEVICE_API_VERSION_3_2:
           case CAMERA_DEVICE_API_VERSION_3_3:
+          case CAMERA_DEVICE_API_VERSION_3_4:
             if (effectiveApiLevel == API_1) { // Camera1 API route
                 sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
-                *client = new Camera2Client(cameraService, tmp, packageName, id, facing,
+                *client = new Camera2Client(cameraService, tmp, packageName, cameraId, facing,
                         clientPid, clientUid, servicePid, legacyMode);
             } else { // Camera2 API route
-                sp<ICameraDeviceCallbacks> tmp =
-                        static_cast<ICameraDeviceCallbacks*>(cameraCb.get());
-                *client = new CameraDeviceClient(cameraService, tmp, packageName, id,
+                sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
+                        static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
+                *client = new CameraDeviceClient(cameraService, tmp, packageName, cameraId,
                         facing, clientPid, clientUid, servicePid);
             }
             break;
           default:
             // Should not be reachable
             ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return INVALID_OPERATION;
+            return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                    "Camera device \"%d\" has unknown HAL version %d",
+                    cameraId, deviceVersion);
         }
     } else {
         // A particular HAL version is requested by caller. Create CameraClient
@@ -757,17 +799,19 @@
             halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
             // Only support higher HAL version device opened as HAL1.0 device.
             sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
-            *client = new CameraClient(cameraService, tmp, packageName, id, facing,
+            *client = new CameraClient(cameraService, tmp, packageName, cameraId, facing,
                     clientPid, clientUid, servicePid, legacyMode);
         } else {
             // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
             ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
                     " opened as HAL %x device", halVersion, deviceVersion,
                     CAMERA_DEVICE_API_VERSION_1_0);
-            return INVALID_OPERATION;
+            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Camera device \"%d\" (HAL version %d) cannot be opened as HAL version %d",
+                    cameraId, deviceVersion, halVersion);
         }
     }
-    return NO_ERROR;
+    return Status::ok();
 }
 
 String8 CameraService::toString(std::set<userid_t> intSet) {
@@ -784,33 +828,35 @@
     return s;
 }
 
-status_t CameraService::initializeShimMetadata(int cameraId) {
+Status CameraService::initializeShimMetadata(int cameraId) {
     int uid = getCallingUid();
 
-    String16 internalPackageName("media");
+    String16 internalPackageName("cameraserver");
     String8 id = String8::format("%d", cameraId);
-    status_t ret = NO_ERROR;
+    Status ret = Status::ok();
     sp<Client> tmp = nullptr;
-    if ((ret = connectHelper<ICameraClient,Client>(sp<ICameraClient>{nullptr}, id,
-            static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED), internalPackageName, uid, API_1,
-            false, true, tmp)) != NO_ERROR) {
-        ALOGE("%s: Error %d (%s) initializing shim metadata.", __FUNCTION__, ret, strerror(ret));
-        return ret;
+    if (!(ret = connectHelper<ICameraClient,Client>(
+            sp<ICameraClient>{nullptr}, id, static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED),
+            internalPackageName, uid, USE_CALLING_PID,
+            API_1, /*legacyMode*/ false, /*shimUpdateOnly*/ true,
+            /*out*/ tmp)
+            ).isOk()) {
+        ALOGE("%s: Error initializing shim metadata: %s", __FUNCTION__, ret.toString8().string());
     }
-    return NO_ERROR;
+    return ret;
 }
 
-status_t CameraService::getLegacyParametersLazy(int cameraId,
+Status CameraService::getLegacyParametersLazy(int cameraId,
         /*out*/
         CameraParameters* parameters) {
 
     ALOGV("%s: for cameraId: %d", __FUNCTION__, cameraId);
 
-    status_t ret = 0;
+    Status ret = Status::ok();
 
     if (parameters == NULL) {
         ALOGE("%s: parameters must not be null", __FUNCTION__);
-        return BAD_VALUE;
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Parameters must not be null");
     }
 
     String8 id = String8::format("%d", cameraId);
@@ -822,19 +868,20 @@
         auto cameraState = getCameraState(id);
         if (cameraState == nullptr) {
             ALOGE("%s: Invalid camera ID: %s", __FUNCTION__, id.string());
-            return BAD_VALUE;
+            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Invalid camera ID: %s", id.string());
         }
         CameraParameters p = cameraState->getShimParams();
         if (!p.isEmpty()) {
             *parameters = p;
-            return NO_ERROR;
+            return ret;
         }
     }
 
     int64_t token = IPCThreadState::self()->clearCallingIdentity();
     ret = initializeShimMetadata(cameraId);
     IPCThreadState::self()->restoreCallingIdentity(token);
-    if (ret != NO_ERROR) {
+    if (!ret.isOk()) {
         // Error already logged by callee
         return ret;
     }
@@ -846,60 +893,94 @@
         auto cameraState = getCameraState(id);
         if (cameraState == nullptr) {
             ALOGE("%s: Invalid camera ID: %s", __FUNCTION__, id.string());
-            return BAD_VALUE;
+            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Invalid camera ID: %s", id.string());
         }
         CameraParameters p = cameraState->getShimParams();
         if (!p.isEmpty()) {
             *parameters = p;
-            return NO_ERROR;
+            return ret;
         }
     }
 
     ALOGE("%s: Parameters were not initialized, or were empty.  Device may not be present.",
             __FUNCTION__);
-    return INVALID_OPERATION;
+    return STATUS_ERROR(ERROR_INVALID_OPERATION, "Unable to initialize legacy parameters");
 }
 
-status_t CameraService::validateConnectLocked(const String8& cameraId, /*inout*/int& clientUid)
-        const {
+// Can camera service trust the caller based on the calling UID?
+static bool isTrustedCallingUid(uid_t uid) {
+    switch (uid) {
+        case AID_MEDIA:         // mediaserver
+        case AID_CAMERASERVER: // cameraserver
+            return true;
+        default:
+            return false;
+    }
+}
+
+Status CameraService::validateConnectLocked(const String8& cameraId,
+        const String8& clientName8, /*inout*/int& clientUid, /*inout*/int& clientPid,
+        /*out*/int& originalClientPid) const {
 
     int callingPid = getCallingPid();
+    int callingUid = getCallingUid();
 
+    // Check if we can trust clientUid
     if (clientUid == USE_CALLING_UID) {
-        clientUid = getCallingUid();
-    } else {
-        // We only trust our own process to forward client UIDs
-        if (callingPid != getpid()) {
-            ALOGE("CameraService::connect X (PID %d) rejected (don't trust clientUid %d)",
-                    callingPid, clientUid);
-            return PERMISSION_DENIED;
-        }
+        clientUid = callingUid;
+    } else if (!isTrustedCallingUid(callingUid)) {
+        ALOGE("CameraService::connect X (calling PID %d, calling UID %d) rejected "
+                "(don't trust clientUid %d)", callingPid, callingUid, clientUid);
+        return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+                "Untrusted caller (calling PID %d, UID %d) trying to "
+                "forward camera access to camera %s for client %s (PID %d, UID %d)",
+                callingPid, callingUid, cameraId.string(),
+                clientName8.string(), clientUid, clientPid);
     }
 
+    // Check if we can trust clientPid
+    if (clientPid == USE_CALLING_PID) {
+        clientPid = callingPid;
+    } else if (!isTrustedCallingUid(callingUid)) {
+        ALOGE("CameraService::connect X (calling PID %d, calling UID %d) rejected "
+                "(don't trust clientPid %d)", callingPid, callingUid, clientPid);
+        return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+                "Untrusted caller (calling PID %d, UID %d) trying to "
+                "forward camera access to camera %s for client %s (PID %d, UID %d)",
+                callingPid, callingUid, cameraId.string(),
+                clientName8.string(), clientUid, clientPid);
+    }
+
+    // If it's not calling from cameraserver, check the permission.
+    if (callingPid != getpid() &&
+            !checkPermission(String16("android.permission.CAMERA"), clientPid, clientUid)) {
+        ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
+        return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+                "Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" without camera permission",
+                clientName8.string(), clientUid, clientPid, cameraId.string());
+    }
+
+    // Only use passed in clientPid to check permission. Use calling PID as the client PID that's
+    // connected to camera service directly.
+    originalClientPid = clientPid;
+    clientPid = callingPid;
+
     if (!mModule) {
         ALOGE("CameraService::connect X (PID %d) rejected (camera HAL module not loaded)",
                 callingPid);
-        return -ENODEV;
+        return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                "No camera HAL module available to open camera device \"%s\"", cameraId.string());
     }
 
     if (getCameraState(cameraId) == nullptr) {
         ALOGE("CameraService::connect X (PID %d) rejected (invalid camera ID %s)", callingPid,
                 cameraId.string());
-        return -ENODEV;
+        return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                "No camera device with ID \"%s\" available", cameraId.string());
     }
 
-    // Check device policy for this camera
-    char value[PROPERTY_VALUE_MAX];
-    char key[PROPERTY_KEY_MAX];
     userid_t clientUserId = multiuser_get_user_id(clientUid);
-    snprintf(key, PROPERTY_KEY_MAX, "sys.secpolicy.camera.off_%d", clientUserId);
-    property_get(key, value, "0");
-    if (strcmp(value, "1") == 0) {
-        // Camera is disabled by DevicePolicyManager.
-        ALOGE("CameraService::connect X (PID %d) rejected (camera %s is disabled by device "
-                "policy)", callingPid, cameraId.string());
-        return -EACCES;
-    }
 
     // Only allow clients who are being used by the current foreground device user, unless calling
     // from our own process.
@@ -907,10 +988,24 @@
         ALOGE("CameraService::connect X (PID %d) rejected (cannot connect from "
                 "device user %d, currently allowed device users: %s)", callingPid, clientUserId,
                 toString(mAllowedUsers).string());
-        return PERMISSION_DENIED;
+        return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+                "Callers from device user %d are not currently allowed to connect to camera \"%s\"",
+                clientUserId, cameraId.string());
     }
 
-    return checkIfDeviceIsUsable(cameraId);
+    status_t err = checkIfDeviceIsUsable(cameraId);
+    if (err != NO_ERROR) {
+        switch(err) {
+            case -ENODEV:
+            case -EBUSY:
+                return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                        "No camera device with ID \"%s\" currently available", cameraId.string());
+            default:
+                return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                        "Unknown error connecting to ID \"%s\"", cameraId.string());
+        }
+    }
+    return Status::ok();
 }
 
 status_t CameraService::checkIfDeviceIsUsable(const String8& cameraId) const {
@@ -922,7 +1017,7 @@
         return -ENODEV;
     }
 
-    ICameraServiceListener::Status currentStatus = cameraState->getStatus();
+    int32_t currentStatus = cameraState->getStatus();
     if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
         ALOGE("CameraService::connect X (PID %d) rejected (camera %s is not connected)",
                 callingPid, cameraId.string());
@@ -999,11 +1094,6 @@
             }
         }
 
-        // Return error if the device was unplugged or removed by the HAL for some reason
-        if ((ret = checkIfDeviceIsUsable(cameraId)) != NO_ERROR) {
-            return ret;
-        }
-
         // Get current active client PIDs
         std::vector<int> ownerPids(mActiveClientManager.getAllOwners());
         ownerPids.push_back(clientPid);
@@ -1030,6 +1120,7 @@
         if (state == nullptr) {
             ALOGE("CameraService::connect X (PID %d) rejected (no camera device with ID %s)",
                 clientPid, cameraId.string());
+            // Should never get here because validateConnectLocked should have errored out
             return BAD_VALUE;
         }
 
@@ -1102,7 +1193,7 @@
                     getCameraPriorityFromProcState(priorities[priorities.size() - 1])));
 
             // Notify the client of disconnection
-            clientSp->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+            clientSp->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
                     CaptureResultExtras());
         }
     }
@@ -1154,38 +1245,41 @@
     return NO_ERROR;
 }
 
-status_t CameraService::connect(
+Status CameraService::connect(
         const sp<ICameraClient>& cameraClient,
         int cameraId,
         const String16& clientPackageName,
         int clientUid,
+        int clientPid,
         /*out*/
-        sp<ICamera>& device) {
+        sp<ICamera>* device) {
 
     ATRACE_CALL();
-    status_t ret = NO_ERROR;
+    Status ret = Status::ok();
     String8 id = String8::format("%d", cameraId);
     sp<Client> client = nullptr;
-    ret = connectHelper<ICameraClient,Client>(cameraClient, id, CAMERA_HAL_API_VERSION_UNSPECIFIED,
-            clientPackageName, clientUid, API_1, false, false, /*out*/client);
+    ret = connectHelper<ICameraClient,Client>(cameraClient, id,
+            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, clientPid, API_1,
+            /*legacyMode*/ false, /*shimUpdateOnly*/ false,
+            /*out*/client);
 
-    if(ret != NO_ERROR) {
+    if(!ret.isOk()) {
         logRejected(id, getCallingPid(), String8(clientPackageName),
-                String8::format("%s (%d)", strerror(-ret), ret));
+                ret.toString8());
         return ret;
     }
 
-    device = client;
-    return NO_ERROR;
+    *device = client;
+    return ret;
 }
 
-status_t CameraService::connectLegacy(
+Status CameraService::connectLegacy(
         const sp<ICameraClient>& cameraClient,
         int cameraId, int halVersion,
         const String16& clientPackageName,
         int clientUid,
         /*out*/
-        sp<ICamera>& device) {
+        sp<ICamera>* device) {
 
     ATRACE_CALL();
     String8 id = String8::format("%d", cameraId);
@@ -1198,61 +1292,68 @@
          * it's a particular version in which case the HAL must supported
          * the open_legacy call
          */
-        ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!",
-                __FUNCTION__, apiVersion);
+        String8 msg = String8::format("Camera HAL module version %x too old for connectLegacy!",
+                apiVersion);
+        ALOGE("%s: %s",
+                __FUNCTION__, msg.string());
         logRejected(id, getCallingPid(), String8(clientPackageName),
-                String8("HAL module version doesn't support legacy HAL connections"));
-        return INVALID_OPERATION;
+                msg);
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
-    status_t ret = NO_ERROR;
+    Status ret = Status::ok();
     sp<Client> client = nullptr;
-    ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion, clientPackageName,
-            clientUid, API_1, true, false, /*out*/client);
+    ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion,
+            clientPackageName, clientUid, USE_CALLING_PID, API_1,
+            /*legacyMode*/ true, /*shimUpdateOnly*/ false,
+            /*out*/client);
 
-    if(ret != NO_ERROR) {
+    if(!ret.isOk()) {
         logRejected(id, getCallingPid(), String8(clientPackageName),
-                String8::format("%s (%d)", strerror(-ret), ret));
+                ret.toString8());
         return ret;
     }
 
-    device = client;
-    return NO_ERROR;
+    *device = client;
+    return ret;
 }
 
-status_t CameraService::connectDevice(
-        const sp<ICameraDeviceCallbacks>& cameraCb,
+Status CameraService::connectDevice(
+        const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
         int cameraId,
         const String16& clientPackageName,
         int clientUid,
         /*out*/
-        sp<ICameraDeviceUser>& device) {
+        sp<hardware::camera2::ICameraDeviceUser>* device) {
 
     ATRACE_CALL();
-    status_t ret = NO_ERROR;
+    Status ret = Status::ok();
     String8 id = String8::format("%d", cameraId);
     sp<CameraDeviceClient> client = nullptr;
-    ret = connectHelper<ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
-            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, API_2, false, false,
+    ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
+            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
+            clientUid, USE_CALLING_PID, API_2,
+            /*legacyMode*/ false, /*shimUpdateOnly*/ false,
             /*out*/client);
 
-    if(ret != NO_ERROR) {
+    if(!ret.isOk()) {
         logRejected(id, getCallingPid(), String8(clientPackageName),
-                String8::format("%s (%d)", strerror(-ret), ret));
+                ret.toString8());
         return ret;
     }
 
-    device = client;
-    return NO_ERROR;
+    *device = client;
+    return ret;
 }
 
-status_t CameraService::setTorchMode(const String16& cameraId, bool enabled,
+Status CameraService::setTorchMode(const String16& cameraId, bool enabled,
         const sp<IBinder>& clientBinder) {
 
     ATRACE_CALL();
     if (enabled && clientBinder == nullptr) {
         ALOGE("%s: torch client binder is NULL", __FUNCTION__);
-        return -EINVAL;
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+                "Torch client Binder is null");
     }
 
     String8 id = String8(cameraId.string());
@@ -1262,35 +1363,47 @@
     auto state = getCameraState(id);
     if (state == nullptr) {
         ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
-        return -EINVAL;
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                "Camera ID \"%s\" is a not valid camera ID", id.string());
     }
 
-    ICameraServiceListener::Status cameraStatus = state->getStatus();
+    int32_t cameraStatus = state->getStatus();
     if (cameraStatus != ICameraServiceListener::STATUS_PRESENT &&
             cameraStatus != ICameraServiceListener::STATUS_NOT_AVAILABLE) {
         ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
-        return -EINVAL;
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                "Camera ID \"%s\" is a not valid camera ID", id.string());
     }
 
     {
         Mutex::Autolock al(mTorchStatusMutex);
-        ICameraServiceListener::TorchStatus status;
-        status_t res = getTorchStatusLocked(id, &status);
-        if (res) {
+        int32_t status;
+        status_t err = getTorchStatusLocked(id, &status);
+        if (err != OK) {
+            if (err == NAME_NOT_FOUND) {
+                return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                        "Camera \"%s\" does not have a flash unit", id.string());
+            }
             ALOGE("%s: getting current torch status failed for camera %s",
                     __FUNCTION__, id.string());
-            return -EINVAL;
+            return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                    "Error updating torch status for camera \"%s\": %s (%d)", id.string(),
+                    strerror(-err), err);
         }
 
         if (status == ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE) {
             if (cameraStatus == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
                 ALOGE("%s: torch mode of camera %s is not available because "
                         "camera is in use", __FUNCTION__, id.string());
-                return -EBUSY;
+                return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+                        "Torch for camera \"%s\" is not available due to an existing camera user",
+                        id.string());
             } else {
                 ALOGE("%s: torch mode of camera %s is not available due to "
                         "insufficient resources", __FUNCTION__, id.string());
-                return -EUSERS;
+                return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                        "Torch for camera \"%s\" is not available due to insufficient resources",
+                        id.string());
             }
         }
     }
@@ -1308,19 +1421,31 @@
         }
     }
 
-    status_t res = mFlashlight->setTorchMode(id, enabled);
+    status_t err = mFlashlight->setTorchMode(id, enabled);
 
-    if (res) {
-        ALOGE("%s: setting torch mode of camera %s to %d failed. %s (%d)",
-                __FUNCTION__, id.string(), enabled, strerror(-res), res);
-        return res;
+    if (err != OK) {
+        int32_t errorCode;
+        String8 msg;
+        switch (err) {
+            case -ENOSYS:
+                msg = String8::format("Camera \"%s\" has no flashlight",
+                    id.string());
+                errorCode = ERROR_ILLEGAL_ARGUMENT;
+                break;
+            default:
+                msg = String8::format(
+                    "Setting torch mode of camera \"%s\" to %d failed: %s (%d)",
+                    id.string(), enabled, strerror(-err), err);
+                errorCode = ERROR_INVALID_OPERATION;
+        }
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(errorCode, msg.string());
     }
 
     {
         // update the link to client's death
         Mutex::Autolock al(mTorchClientMapMutex);
         ssize_t index = mTorchClientMap.indexOfKey(id);
-        BatteryNotifier& notifier(BatteryNotifier::getInstance());
         if (enabled) {
             if (index == NAME_NOT_FOUND) {
                 mTorchClientMap.add(id, clientBinder);
@@ -1334,34 +1459,36 @@
         }
     }
 
-    return OK;
+    return Status::ok();
 }
 
-void CameraService::notifySystemEvent(int32_t eventId, const int32_t* args, size_t length) {
+Status CameraService::notifySystemEvent(int32_t eventId,
+        const std::vector<int32_t>& args) {
     ATRACE_CALL();
 
     switch(eventId) {
-        case ICameraService::USER_SWITCHED: {
-            doUserSwitch(/*newUserIds*/args, /*length*/length);
+        case ICameraService::EVENT_USER_SWITCHED: {
+            doUserSwitch(/*newUserIds*/ args);
             break;
         }
-        case ICameraService::NO_EVENT:
+        case ICameraService::EVENT_NONE:
         default: {
             ALOGW("%s: Received invalid system event from system_server: %d", __FUNCTION__,
                     eventId);
             break;
         }
     }
+    return Status::ok();
 }
 
-status_t CameraService::addListener(const sp<ICameraServiceListener>& listener) {
+Status CameraService::addListener(const sp<ICameraServiceListener>& listener) {
     ATRACE_CALL();
 
     ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
 
     if (listener == nullptr) {
         ALOGE("%s: Listener must not be null", __FUNCTION__);
-        return BAD_VALUE;
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Null listener given to addListener");
     }
 
     Mutex::Autolock lock(mServiceLock);
@@ -1372,7 +1499,7 @@
             if (IInterface::asBinder(it) == IInterface::asBinder(listener)) {
                 ALOGW("%s: Tried to add listener %p which was already subscribed",
                       __FUNCTION__, listener.get());
-                return ALREADY_EXISTS;
+                return STATUS_ERROR(ERROR_ALREADY_EXISTS, "Listener already registered");
             }
         }
 
@@ -1401,17 +1528,17 @@
         }
     }
 
-    return OK;
+    return Status::ok();
 }
 
-status_t CameraService::removeListener(const sp<ICameraServiceListener>& listener) {
+Status CameraService::removeListener(const sp<ICameraServiceListener>& listener) {
     ATRACE_CALL();
 
     ALOGV("%s: Remove listener %p", __FUNCTION__, listener.get());
 
     if (listener == 0) {
         ALOGE("%s: Listener must not be null", __FUNCTION__);
-        return BAD_VALUE;
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Null listener given to removeListener");
     }
 
     Mutex::Autolock lock(mServiceLock);
@@ -1421,7 +1548,7 @@
         for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
             if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
                 mListenerList.erase(it);
-                return OK;
+                return Status::ok();
             }
         }
     }
@@ -1429,23 +1556,23 @@
     ALOGW("%s: Tried to remove a listener %p which was not subscribed",
           __FUNCTION__, listener.get());
 
-    return BAD_VALUE;
+    return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Unregistered listener given to removeListener");
 }
 
-status_t CameraService::getLegacyParameters(int cameraId, /*out*/String16* parameters) {
+Status CameraService::getLegacyParameters(int cameraId, /*out*/String16* parameters) {
 
     ATRACE_CALL();
     ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
 
     if (parameters == NULL) {
         ALOGE("%s: parameters must not be null", __FUNCTION__);
-        return BAD_VALUE;
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Parameters must not be null");
     }
 
-    status_t ret = 0;
+    Status ret = Status::ok();
 
     CameraParameters shimParams;
-    if ((ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)) != OK) {
+    if (!(ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)).isOk()) {
         // Error logged by caller
         return ret;
     }
@@ -1455,10 +1582,10 @@
 
     *parameters = shimParamsString16;
 
-    return OK;
+    return ret;
 }
 
-status_t CameraService::supportsCameraApi(int cameraId, int apiVersion) {
+Status CameraService::supportsCameraApi(int cameraId, int apiVersion, bool *isSupported) {
     ATRACE_CALL();
 
     ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
@@ -1468,42 +1595,49 @@
         case API_VERSION_2:
             break;
         default:
-            ALOGE("%s: Bad API version %d", __FUNCTION__, apiVersion);
-            return BAD_VALUE;
+            String8 msg = String8::format("Unknown API version %d", apiVersion);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
     int facing = -1;
     int deviceVersion = getDeviceVersion(cameraId, &facing);
 
     switch(deviceVersion) {
-      case CAMERA_DEVICE_API_VERSION_1_0:
-      case CAMERA_DEVICE_API_VERSION_2_0:
-      case CAMERA_DEVICE_API_VERSION_2_1:
-      case CAMERA_DEVICE_API_VERSION_3_0:
-      case CAMERA_DEVICE_API_VERSION_3_1:
-        if (apiVersion == API_VERSION_2) {
-            ALOGV("%s: Camera id %d uses HAL prior to HAL3.2, doesn't support api2 without shim",
+        case CAMERA_DEVICE_API_VERSION_1_0:
+        case CAMERA_DEVICE_API_VERSION_3_0:
+        case CAMERA_DEVICE_API_VERSION_3_1:
+            if (apiVersion == API_VERSION_2) {
+                ALOGV("%s: Camera id %d uses HAL version %d <3.2, doesn't support api2 without shim",
+                        __FUNCTION__, cameraId, deviceVersion);
+                *isSupported = false;
+            } else { // if (apiVersion == API_VERSION_1) {
+                ALOGV("%s: Camera id %d uses older HAL before 3.2, but api1 is always supported",
+                        __FUNCTION__, cameraId);
+                *isSupported = true;
+            }
+            break;
+        case CAMERA_DEVICE_API_VERSION_3_2:
+        case CAMERA_DEVICE_API_VERSION_3_3:
+        case CAMERA_DEVICE_API_VERSION_3_4:
+            ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
                     __FUNCTION__, cameraId);
-            return -EOPNOTSUPP;
-        } else { // if (apiVersion == API_VERSION_1) {
-            ALOGV("%s: Camera id %d uses older HAL before 3.2, but api1 is always supported",
-                    __FUNCTION__, cameraId);
-            return OK;
+            *isSupported = true;
+            break;
+        case -1: {
+            String8 msg = String8::format("Unknown camera ID %d", cameraId);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
         }
-      case CAMERA_DEVICE_API_VERSION_3_2:
-      case CAMERA_DEVICE_API_VERSION_3_3:
-        ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
-                __FUNCTION__, cameraId);
-        return OK;
-      case -1:
-        ALOGE("%s: Invalid camera id %d", __FUNCTION__, cameraId);
-        return BAD_VALUE;
-      default:
-        ALOGE("%s: Unknown camera device HAL version: %d", __FUNCTION__, deviceVersion);
-        return INVALID_OPERATION;
+        default: {
+            String8 msg = String8::format("Unknown device version %d for device %d",
+                    deviceVersion, cameraId);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(ERROR_INVALID_OPERATION, msg.string());
+        }
     }
 
-    return OK;
+    return Status::ok();
 }
 
 void CameraService::removeByClient(const BasicClient* client) {
@@ -1540,7 +1674,8 @@
                 evicted.push_back(clientSp);
 
                 // Notify the client of disconnection
-                clientSp->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                clientSp->notifyError(
+                        hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
                         CaptureResultExtras());
             }
         }
@@ -1569,8 +1704,35 @@
 
 /**
  * Check camera capabilities, such as support for basic color operation
+ * Also check that the device HAL version is still in support
  */
 int CameraService::checkCameraCapabilities(int id, camera_info info, int *latestStrangeCameraId) {
+    // device_version undefined in CAMERA_MODULE_API_VERSION_1_0,
+    // All CAMERA_MODULE_API_VERSION_1_0 devices are backward-compatible
+    if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
+        // Verify the device version is in the supported range
+        switch (info.device_version) {
+            case CAMERA_DEVICE_API_VERSION_1_0:
+            case CAMERA_DEVICE_API_VERSION_3_0:
+            case CAMERA_DEVICE_API_VERSION_3_1:
+            case CAMERA_DEVICE_API_VERSION_3_2:
+            case CAMERA_DEVICE_API_VERSION_3_3:
+            case CAMERA_DEVICE_API_VERSION_3_4:
+                // in support
+                break;
+            case CAMERA_DEVICE_API_VERSION_2_0:
+            case CAMERA_DEVICE_API_VERSION_2_1:
+                // no longer supported
+            default:
+                ALOGE("%s: Device %d has HAL version %x, which is not supported",
+                        __FUNCTION__, id, info.device_version);
+                String8 msg = String8::format(
+                        "Unsupported device HAL version %x for device %d",
+                        info.device_version, id);
+                logServiceError(msg.string(), NO_INIT);
+                return NO_INIT;
+        }
+    }
 
     // Assume all devices pre-v3.3 are backward-compatible
     bool isBackwardCompatible = true;
@@ -1605,10 +1767,10 @@
             ALOGE("%s: Normal camera ID %d higher than strange camera ID %d. "
                     "This is not allowed due backward-compatibility requirements",
                     __FUNCTION__, id, *latestStrangeCameraId);
-            logServiceError("Invalid order of camera devices", ENODEV);
+            logServiceError("Invalid order of camera devices", NO_INIT);
             mNumberOfCameras = 0;
             mNumberOfNormalCameras = 0;
-            return INVALID_OPERATION;
+            return NO_INIT;
         }
     }
     return OK;
@@ -1639,19 +1801,19 @@
     return clientDescriptorPtr->getValue();
 }
 
-void CameraService::doUserSwitch(const int32_t* newUserId, size_t length) {
+void CameraService::doUserSwitch(const std::vector<int32_t>& newUserIds) {
     // Acquire mServiceLock and prevent other clients from connecting
     std::unique_ptr<AutoConditionLock> lock =
             AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
 
     std::set<userid_t> newAllowedUsers;
-    for (size_t i = 0; i < length; i++) {
-        if (newUserId[i] < 0) {
+    for (size_t i = 0; i < newUserIds.size(); i++) {
+        if (newUserIds[i] < 0) {
             ALOGE("%s: Bad user ID %d given during user switch, ignoring.",
-                    __FUNCTION__, newUserId[i]);
+                    __FUNCTION__, newUserIds[i]);
             return;
         }
-        newAllowedUsers.insert(static_cast<userid_t>(newUserId[i]));
+        newAllowedUsers.insert(static_cast<userid_t>(newUserIds[i]));
     }
 
 
@@ -1766,7 +1928,7 @@
 
 void CameraService::logServiceError(const char* msg, int errorCode) {
     String8 curTime = getFormattedCurrentTime();
-    logEvent(String8::format("SERVICE ERROR: %s : %d (%s)", msg, errorCode, strerror(errorCode)));
+    logEvent(String8::format("SERVICE ERROR: %s : %d (%s)", msg, errorCode, strerror(-errorCode)));
 }
 
 status_t CameraService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
@@ -1777,22 +1939,7 @@
 
     // Permission checks
     switch (code) {
-        case BnCameraService::CONNECT:
-        case BnCameraService::CONNECT_DEVICE:
-        case BnCameraService::CONNECT_LEGACY: {
-            if (pid != selfPid) {
-                // we're called from a different process, do the real check
-                if (!checkCallingPermission(
-                        String16("android.permission.CAMERA"))) {
-                    const int uid = getCallingUid();
-                    ALOGE("Permission Denial: "
-                         "can't use the camera pid=%d, uid=%d", pid, uid);
-                    return PERMISSION_DENIED;
-                }
-            }
-            break;
-        }
-        case BnCameraService::NOTIFY_SYSTEM_EVENT: {
+        case BnCameraService::NOTIFYSYSTEMEVENT: {
             if (pid != selfPid) {
                 // Ensure we're being called by system_server, or similar process with
                 // permissions to notify the camera service about system events
@@ -1916,6 +2063,37 @@
     mServicePid = servicePid;
     mOpsActive = false;
     mDestructionStarted = false;
+
+    // In some cases the calling code has no access to the package it runs under.
+    // For example, NDK camera API.
+    // In this case we will get the packages for the calling UID and pick the first one
+    // for attributing the app op. This will work correctly for runtime permissions
+    // as for legacy apps we will toggle the app op for all packages in the UID.
+    // The caveat is that the operation may be attributed to the wrong package and
+    // stats based on app ops may be slightly off.
+    if (mClientPackageName.size() <= 0) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder = sm->getService(String16(kPermissionServiceName));
+        if (binder == 0) {
+            ALOGE("Cannot get permission service");
+            // Leave mClientPackageName unchanged (empty) and the further interaction
+            // with camera will fail in BasicClient::startCameraOps
+            return;
+        }
+
+        sp<IPermissionController> permCtrl = interface_cast<IPermissionController>(binder);
+        Vector<String16> packages;
+
+        permCtrl->getPackagesForUid(mClientUid, packages);
+
+        if (packages.isEmpty()) {
+            ALOGE("No packages for calling UID");
+            // Leave mClientPackageName unchanged (empty) and the further interaction
+            // with camera will fail in BasicClient::startCameraOps
+            return;
+        }
+        mClientPackageName = packages[0];
+    }
 }
 
 CameraService::BasicClient::~BasicClient() {
@@ -1923,9 +2101,10 @@
     mDestructionStarted = true;
 }
 
-void CameraService::BasicClient::disconnect() {
+binder::Status CameraService::BasicClient::disconnect() {
+    binder::Status res = Status::ok();
     if (mDisconnected) {
-        return;
+        return res;
     }
     mDisconnected = true;
 
@@ -1943,6 +2122,8 @@
 
     // client shouldn't be able to call into us anymore
     mClientPid = 0;
+
+    return res;
 }
 
 status_t CameraService::BasicClient::dump(int, const Vector<String16>&) {
@@ -2024,7 +2205,7 @@
                 mClientPackageName);
         mOpsActive = false;
 
-        auto rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
+        std::initializer_list<int32_t> rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
                 ICameraServiceListener::STATUS_ENUMERATING};
 
         // Transition to PRESENT if the camera is not in either of the rejected states
@@ -2075,7 +2256,7 @@
         // and to prevent further calls by client.
         mClientPid = getCallingPid();
         CaptureResultExtras resultExtras; // a dummy result (invalid)
-        notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, resultExtras);
+        notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, resultExtras);
         disconnect();
     }
 }
@@ -2093,8 +2274,10 @@
     return sp<Client>{nullptr};
 }
 
-void CameraService::Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+void CameraService::Client::notifyError(int32_t errorCode,
         const CaptureResultExtras& resultExtras) {
+    (void) errorCode;
+    (void) resultExtras;
     if (mRemoteCallback != NULL) {
         mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
     } else {
@@ -2103,9 +2286,9 @@
 }
 
 // NOTE: function is idempotent
-void CameraService::Client::disconnect() {
+binder::Status CameraService::Client::disconnect() {
     ALOGV("Client::disconnect");
-    BasicClient::disconnect();
+    return BasicClient::disconnect();
 }
 
 bool CameraService::Client::canCastToApiClient(apiLevel level) const {
@@ -2134,7 +2317,7 @@
 
 CameraService::CameraState::~CameraState() {}
 
-ICameraServiceListener::Status CameraService::CameraState::getStatus() const {
+int32_t CameraService::CameraState::getStatus() const {
     Mutex::Autolock lock(mStatusLock);
     return mStatus;
 }
@@ -2312,6 +2495,7 @@
         result.appendFormat("Camera module name: %s\n", mModule->getModuleName());
         result.appendFormat("Camera module author: %s\n", mModule->getModuleAuthor());
         result.appendFormat("Number of camera devices: %d\n", mNumberOfCameras);
+        result.appendFormat("Number of normal camera devices: %d\n", mNumberOfNormalCameras);
         String8 activeClientString = mActiveClientManager.toString();
         result.appendFormat("Active Camera Clients:\n%s", activeClientString.string());
         result.appendFormat("Allowed users:\n%s\n", toString(mAllowedUsers).string());
@@ -2362,7 +2546,7 @@
                 result.appendFormat("  Resource Cost: %d\n", state.second->getCost());
                 result.appendFormat("  Conflicting Devices:");
                 for (auto& id : conflicting) {
-                    result.appendFormat(" %s", cameraId.string());
+                    result.appendFormat(" %s", id.string());
                 }
                 if (conflicting.size() == 0) {
                     result.appendFormat(" NONE");
@@ -2370,7 +2554,7 @@
                 result.appendFormat("\n");
 
                 result.appendFormat("  Device version: %#x\n", deviceVersion);
-                if (deviceVersion >= CAMERA_DEVICE_API_VERSION_2_0) {
+                if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
                     result.appendFormat("  Device static metadata:\n");
                     write(fd, result.string(), result.size());
                     dump_indented_camera_metadata(info.static_camera_characteristics,
@@ -2421,16 +2605,32 @@
         write(fd, "\n", 1);
         camera3::CameraTraces::dump(fd, args);
 
-        // change logging level
+        // Process dump arguments, if any
         int n = args.size();
-        for (int i = 0; i + 1 < n; i++) {
-            String16 verboseOption("-v");
+        String16 verboseOption("-v");
+        String16 unreachableOption("--unreachable");
+        for (int i = 0; i < n; i++) {
             if (args[i] == verboseOption) {
+                // change logging level
+                if (i + 1 >= n) continue;
                 String8 levelStr(args[i+1]);
                 int level = atoi(levelStr.string());
                 result = String8::format("\nSetting log level to %d.\n", level);
                 setLogLevel(level);
                 write(fd, result.string(), result.size());
+            } else if (args[i] == unreachableOption) {
+                // Dump memory analysis
+                // TODO - should limit be an argument parameter?
+                UnreachableMemoryInfo info;
+                bool success = GetUnreachableMemory(info, /*limit*/ 10000);
+                if (!success) {
+                    dprintf(fd, "\nUnable to dump unreachable memory. "
+                            "Try disabling SELinux enforcement.\n");
+                } else {
+                    dprintf(fd, "\nDumping unreachable memory:\n");
+                    std::string s = info.ToString(/*log_contents*/ true);
+                    write(fd, s.c_str(), s.size());
+                }
             }
         }
     }
@@ -2495,12 +2695,12 @@
             __FUNCTION__);
 }
 
-void CameraService::updateStatus(ICameraServiceListener::Status status, const String8& cameraId) {
+void CameraService::updateStatus(int32_t status, const String8& cameraId) {
     updateStatus(status, cameraId, {});
 }
 
-void CameraService::updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
-        std::initializer_list<ICameraServiceListener::Status> rejectSourceStates) {
+void CameraService::updateStatus(int32_t status, const String8& cameraId,
+        std::initializer_list<int32_t> rejectSourceStates) {
     // Do not lock mServiceLock here or can get into a deadlock from
     // connect() -> disconnect -> updateStatus
 
@@ -2515,15 +2715,15 @@
     // Update the status for this camera state, then send the onStatusChangedCallbacks to each
     // of the listeners with both the mStatusStatus and mStatusListenerLock held
     state->updateStatus(status, cameraId, rejectSourceStates, [this]
-            (const String8& cameraId, ICameraServiceListener::Status status) {
+            (const String8& cameraId, int32_t status) {
 
             if (status != ICameraServiceListener::STATUS_ENUMERATING) {
                 // Update torch status if it has a flash unit.
                 Mutex::Autolock al(mTorchStatusMutex);
-                ICameraServiceListener::TorchStatus torchStatus;
+                int32_t torchStatus;
                 if (getTorchStatusLocked(cameraId, &torchStatus) !=
                         NAME_NOT_FOUND) {
-                    ICameraServiceListener::TorchStatus newTorchStatus =
+                    int32_t newTorchStatus =
                             status == ICameraServiceListener::STATUS_PRESENT ?
                             ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF :
                             ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
@@ -2553,7 +2753,7 @@
 
 status_t CameraService::getTorchStatusLocked(
         const String8& cameraId,
-        ICameraServiceListener::TorchStatus *status) const {
+        int32_t *status) const {
     if (!status) {
         return BAD_VALUE;
     }
@@ -2568,12 +2768,12 @@
 }
 
 status_t CameraService::setTorchStatusLocked(const String8& cameraId,
-        ICameraServiceListener::TorchStatus status) {
+        int32_t status) {
     ssize_t index = mTorchStatusMap.indexOfKey(cameraId);
     if (index == NAME_NOT_FOUND) {
         return BAD_VALUE;
     }
-    ICameraServiceListener::TorchStatus& item =
+    int32_t& item =
             mTorchStatusMap.editValueAt(index);
     item = status;
 
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index d2c1bd3..11b1351 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -17,25 +17,22 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
 #define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
 
+#include <android/hardware/BnCameraService.h>
+#include <android/hardware/ICameraServiceListener.h>
+
 #include <cutils/multiuser.h>
 #include <utils/Vector.h>
 #include <utils/KeyedVector.h>
 #include <binder/AppOpsManager.h>
 #include <binder/BinderService.h>
 #include <binder/IAppOpsCallback.h>
-#include <camera/ICameraService.h>
 #include <camera/ICameraServiceProxy.h>
 #include <hardware/camera.h>
 
-#include <camera/ICamera.h>
-#include <camera/ICameraClient.h>
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
 #include <camera/VendorTagDescriptor.h>
 #include <camera/CaptureResult.h>
 #include <camera/CameraParameters.h>
 
-#include <camera/ICameraServiceListener.h>
 #include "CameraFlashlight.h"
 
 #include "common/CameraModule.h"
@@ -58,7 +55,7 @@
 
 class CameraService :
     public BinderService<CameraService>,
-    public BnCameraService,
+    public ::android::hardware::BnCameraService,
     public IBinder::DeathRecipient,
     public camera_module_callbacks_t
 {
@@ -101,55 +98,58 @@
     virtual void        onDeviceStatusChanged(camera_device_status_t cameraId,
                                               camera_device_status_t newStatus);
     virtual void        onTorchStatusChanged(const String8& cameraId,
-                                             ICameraServiceListener::TorchStatus
-                                                   newStatus);
+                                             int32_t newStatus);
 
     /////////////////////////////////////////////////////////////////////
     // ICameraService
-    virtual int32_t     getNumberOfCameras(int type);
-    virtual int32_t     getNumberOfCameras();
+    virtual binder::Status     getNumberOfCameras(int32_t type, int32_t* numCameras);
 
-    virtual status_t    getCameraInfo(int cameraId,
-                                      struct CameraInfo* cameraInfo);
-    virtual status_t    getCameraCharacteristics(int cameraId,
-                                                 CameraMetadata* cameraInfo);
-    virtual status_t    getCameraVendorTagDescriptor(/*out*/ sp<VendorTagDescriptor>& desc);
-
-    virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
-            const String16& clientPackageName, int clientUid,
+    virtual binder::Status     getCameraInfo(int cameraId,
+            hardware::CameraInfo* cameraInfo);
+    virtual binder::Status     getCameraCharacteristics(int cameraId,
+            CameraMetadata* cameraInfo);
+    virtual binder::Status     getCameraVendorTagDescriptor(
             /*out*/
-            sp<ICamera>& device);
+            hardware::camera2::params::VendorTagDescriptor* desc);
 
-    virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
-            int halVersion, const String16& clientPackageName, int clientUid,
+    virtual binder::Status     connect(const sp<hardware::ICameraClient>& cameraClient,
+            int32_t cameraId, const String16& clientPackageName,
+            int32_t clientUid, int clientPid,
             /*out*/
-            sp<ICamera>& device);
+            sp<hardware::ICamera>* device);
 
-    virtual status_t connectDevice(
-            const sp<ICameraDeviceCallbacks>& cameraCb,
-            int cameraId,
-            const String16& clientPackageName,
-            int clientUid,
+    virtual binder::Status     connectLegacy(const sp<hardware::ICameraClient>& cameraClient,
+            int32_t cameraId, int32_t halVersion,
+            const String16& clientPackageName, int32_t clientUid,
             /*out*/
-            sp<ICameraDeviceUser>& device);
+            sp<hardware::ICamera>* device);
 
-    virtual status_t    addListener(const sp<ICameraServiceListener>& listener);
-    virtual status_t    removeListener(
-                                    const sp<ICameraServiceListener>& listener);
+    virtual binder::Status     connectDevice(
+            const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb, int32_t cameraId,
+            const String16& clientPackageName, int32_t clientUid,
+            /*out*/
+            sp<hardware::camera2::ICameraDeviceUser>* device);
 
-    virtual status_t    getLegacyParameters(
-            int cameraId,
+    virtual binder::Status    addListener(const sp<hardware::ICameraServiceListener>& listener);
+    virtual binder::Status    removeListener(
+            const sp<hardware::ICameraServiceListener>& listener);
+
+    virtual binder::Status    getLegacyParameters(
+            int32_t cameraId,
             /*out*/
             String16* parameters);
 
-    virtual status_t    setTorchMode(const String16& cameraId, bool enabled,
+    virtual binder::Status    setTorchMode(const String16& cameraId, bool enabled,
             const sp<IBinder>& clientBinder);
 
-    virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t length);
+    virtual binder::Status    notifySystemEvent(int32_t eventId,
+            const std::vector<int32_t>& args);
 
     // OK = supports api of that version, -EOPNOTSUPP = does not support
-    virtual status_t    supportsCameraApi(
-            int cameraId, int apiVersion);
+    virtual binder::Status    supportsCameraApi(
+            int32_t cameraId, int32_t apiVersion,
+            /*out*/
+            bool *isSupported);
 
     // Extra permissions checks
     virtual status_t    onTransact(uint32_t code, const Parcel& data,
@@ -185,35 +185,35 @@
 
     /////////////////////////////////////////////////////////////////////
     // Shared utilities
-    static status_t     filterGetInfoErrorCode(status_t err);
+    static binder::Status filterGetInfoErrorCode(status_t err);
 
     /////////////////////////////////////////////////////////////////////
     // CameraClient functionality
 
     class BasicClient : public virtual RefBase {
     public:
-        virtual status_t    initialize(CameraModule *module) = 0;
-        virtual void        disconnect();
+        virtual status_t       initialize(CameraModule *module) = 0;
+        virtual binder::Status disconnect();
 
         // because we can't virtually inherit IInterface, which breaks
         // virtual inheritance
-        virtual sp<IBinder> asBinderWrapper() = 0;
+        virtual sp<IBinder>    asBinderWrapper() = 0;
 
         // Return the remote callback binder object (e.g. ICameraDeviceCallbacks)
-        sp<IBinder>         getRemote() {
+        sp<IBinder>            getRemote() {
             return mRemoteBinder;
         }
 
         // Disallows dumping over binder interface
-        virtual status_t      dump(int fd, const Vector<String16>& args);
+        virtual status_t dump(int fd, const Vector<String16>& args);
         // Internal dump method to be called by CameraService
-        virtual status_t      dumpClient(int fd, const Vector<String16>& args) = 0;
+        virtual status_t dumpClient(int fd, const Vector<String16>& args) = 0;
 
         // Return the package name for this client
         virtual String16 getPackageName() const;
 
         // Notify client about a fatal error
-        virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+        virtual void notifyError(int32_t errorCode,
                 const CaptureResultExtras& resultExtras) = 0;
 
         // Get the UID of the application client using this
@@ -244,13 +244,13 @@
         bool                            mDestructionStarted;
 
         // these are initialized in the constructor.
-        sp<CameraService>               mCameraService;  // immutable after constructor
-        int                             mCameraId;       // immutable after constructor
-        int                             mCameraFacing;   // immutable after constructor
-        const String16                  mClientPackageName;
+        sp<CameraService>               mCameraService;     // immutable after constructor
+        int                             mCameraId;          // immutable after constructor
+        int                             mCameraFacing;      // immutable after constructor
+        String16                        mClientPackageName; // immutable after constructor
         pid_t                           mClientPid;
-        uid_t                           mClientUid;      // immutable after constructor
-        pid_t                           mServicePid;     // immutable after constructor
+        uid_t                           mClientUid;         // immutable after constructor
+        pid_t                           mServicePid;        // immutable after constructor
         bool                            mDisconnected;
 
         // - The app-side Binder interface to receive callbacks from us
@@ -282,14 +282,14 @@
         virtual void opChanged(int32_t op, const String16& packageName);
     }; // class BasicClient
 
-    class Client : public BnCamera, public BasicClient
+    class Client : public hardware::BnCamera, public BasicClient
     {
     public:
-        typedef ICameraClient TCamCallbacks;
+        typedef hardware::ICameraClient TCamCallbacks;
 
         // ICamera interface (see ICamera for details)
-        virtual void          disconnect();
-        virtual status_t      connect(const sp<ICameraClient>& client) = 0;
+        virtual binder::Status disconnect();
+        virtual status_t      connect(const sp<hardware::ICameraClient>& client) = 0;
         virtual status_t      lock() = 0;
         virtual status_t      unlock() = 0;
         virtual status_t      setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer)=0;
@@ -299,7 +299,7 @@
         virtual status_t      startPreview() = 0;
         virtual void          stopPreview() = 0;
         virtual bool          previewEnabled() = 0;
-        virtual status_t      storeMetaDataInBuffers(bool enabled) = 0;
+        virtual status_t      setVideoBufferMode(int32_t videoBufferMode) = 0;
         virtual status_t      startRecording() = 0;
         virtual void          stopRecording() = 0;
         virtual bool          recordingEnabled() = 0;
@@ -310,10 +310,11 @@
         virtual status_t      setParameters(const String8& params) = 0;
         virtual String8       getParameters() const = 0;
         virtual status_t      sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) = 0;
+        virtual status_t      setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer) = 0;
 
         // Interface used by CameraService
         Client(const sp<CameraService>& cameraService,
-                const sp<ICameraClient>& cameraClient,
+                const sp<hardware::ICameraClient>& cameraClient,
                 const String16& clientPackageName,
                 int cameraId,
                 int cameraFacing,
@@ -323,7 +324,7 @@
         ~Client();
 
         // return our camera client
-        const sp<ICameraClient>&    getRemoteCallback() {
+        const sp<hardware::ICameraClient>&    getRemoteCallback() {
             return mRemoteCallback;
         }
 
@@ -331,7 +332,7 @@
             return asBinder(this);
         }
 
-        virtual void         notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+        virtual void         notifyError(int32_t errorCode,
                                          const CaptureResultExtras& resultExtras);
 
         // Check what API level is used for this client. This is used to determine which
@@ -344,7 +345,7 @@
         // Initialized in constructor
 
         // - The app-side Binder interface to receive callbacks from us
-        sp<ICameraClient>               mRemoteCallback;
+        sp<hardware::ICameraClient>               mRemoteCallback;
 
     }; // class Client
 
@@ -431,12 +432,12 @@
          *
          * This method acquires mStatusLock.
          */
-        ICameraServiceListener::Status getStatus() const;
+        int32_t getStatus() const;
 
         /**
          * This function updates the status for this camera device, unless the given status
          * is in the given list of rejected status states, and execute the function passed in
-         * with a signature onStatusUpdateLocked(const String8&, ICameraServiceListener::Status)
+         * with a signature onStatusUpdateLocked(const String8&, int32_t)
          * if the status has changed.
          *
          * This method is idempotent, and will not result in the function passed to
@@ -444,8 +445,8 @@
          * This method aquires mStatusLock.
          */
         template<class Func>
-        void updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
-                std::initializer_list<ICameraServiceListener::Status> rejectSourceStates,
+        void updateStatus(int32_t status, const String8& cameraId,
+                std::initializer_list<int32_t> rejectSourceStates,
                 Func onStatusUpdatedLocked);
 
         /**
@@ -476,7 +477,7 @@
 
     private:
         const String8 mId;
-        ICameraServiceListener::Status mStatus; // protected by mStatusLock
+        int32_t mStatus; // protected by mStatusLock
         const int mCost;
         std::set<String8> mConflicting;
         mutable Mutex mStatusLock;
@@ -487,7 +488,14 @@
     virtual void onFirstRef();
 
     // Check if we can connect, before we acquire the service lock.
-    status_t validateConnectLocked(const String8& cameraId, /*inout*/int& clientUid) const;
+    // The returned originalClientPid is the PID of the original process that wants to connect to
+    // camera.
+    // The returned clientPid is the PID of the client that directly connects to camera.
+    // originalClientPid and clientPid are usually the same except when the application uses
+    // mediaserver to connect to camera (using MediaRecorder to connect to camera). In that case,
+    // clientPid is the PID of mediaserver and originalClientPid is the PID of the application.
+    binder::Status validateConnectLocked(const String8& cameraId, const String8& clientName8,
+          /*inout*/int& clientUid, /*inout*/int& clientPid, /*out*/int& originalClientPid) const;
 
     // Handle active client evictions, and update service state.
     // Only call with with mServiceLock held.
@@ -499,9 +507,11 @@
 
     // Single implementation shared between the various connect calls
     template<class CALLBACK, class CLIENT>
-    status_t connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId, int halVersion,
-            const String16& clientPackageName, int clientUid, apiLevel effectiveApiLevel,
-            bool legacyMode, bool shimUpdateOnly, /*out*/sp<CLIENT>& device);
+    binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
+            int halVersion, const String16& clientPackageName,
+            int clientUid, int clientPid,
+            apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+            /*out*/sp<CLIENT>& device);
 
     // Lock guarding camera service state
     Mutex               mServiceLock;
@@ -580,7 +590,7 @@
     /**
      * Handle a notification that the current device user has changed.
      */
-    void doUserSwitch(const int32_t* newUserId, size_t length);
+    void doUserSwitch(const std::vector<int32_t>& newUserIds);
 
     /**
      * Add an event log message.
@@ -626,6 +636,7 @@
 
     /**
      * Add a event log message that a serious service-level error has occured
+     * The errorCode should be one of the Android Errors
      */
     void logServiceError(const char* msg, int errorCode);
 
@@ -647,7 +658,7 @@
     CameraModule*     mModule;
 
     // Guarded by mStatusListenerMutex
-    std::vector<sp<ICameraServiceListener>> mListenerList;
+    std::vector<sp<hardware::ICameraServiceListener>> mListenerList;
     Mutex       mStatusListenerLock;
 
     /**
@@ -658,9 +669,9 @@
      * This method must be idempotent.
      * This method acquires mStatusLock and mStatusListenerLock.
      */
-    void updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
-            std::initializer_list<ICameraServiceListener::Status> rejectedSourceStates);
-    void updateStatus(ICameraServiceListener::Status status, const String8& cameraId);
+    void updateStatus(int32_t status, const String8& cameraId,
+            std::initializer_list<int32_t> rejectedSourceStates);
+    void updateStatus(int32_t status, const String8& cameraId);
 
     // flashlight control
     sp<CameraFlashlight> mFlashlight;
@@ -671,7 +682,7 @@
     // guard mTorchUidMap
     Mutex                mTorchUidMapMutex;
     // camera id -> torch status
-    KeyedVector<String8, ICameraServiceListener::TorchStatus> mTorchStatusMap;
+    KeyedVector<String8, int32_t> mTorchStatusMap;
     // camera id -> torch client binder
     // only store the last client that turns on each camera's torch mode
     KeyedVector<String8, sp<IBinder>> mTorchClientMap;
@@ -684,15 +695,15 @@
     // handle torch mode status change and invoke callbacks. mTorchStatusMutex
     // should be locked.
     void onTorchStatusChangedLocked(const String8& cameraId,
-            ICameraServiceListener::TorchStatus newStatus);
+            int32_t newStatus);
 
     // get a camera's torch status. mTorchStatusMutex should be locked.
     status_t getTorchStatusLocked(const String8 &cameraId,
-            ICameraServiceListener::TorchStatus *status) const;
+            int32_t *status) const;
 
     // set a camera's torch status. mTorchStatusMutex should be locked.
     status_t setTorchStatusLocked(const String8 &cameraId,
-            ICameraServiceListener::TorchStatus status);
+            int32_t status);
 
     // IBinder::DeathRecipient implementation
     virtual void        binderDied(const wp<IBinder> &who);
@@ -704,25 +715,25 @@
     /**
      * Initialize and cache the metadata used by the HAL1 shim for a given cameraId.
      *
-     * Returns OK on success, or a negative error code.
+     * Sets Status to a service-specific error on failure
      */
-    status_t            initializeShimMetadata(int cameraId);
+    binder::Status      initializeShimMetadata(int cameraId);
 
     /**
      * Get the cached CameraParameters for the camera. If they haven't been
      * cached yet, then initialize them for the first time.
      *
-     * Returns OK on success, or a negative error code.
+     * Sets Status to a service-specific error on failure
      */
-    status_t            getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters);
+    binder::Status      getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters);
 
     /**
      * Generate the CameraCharacteristics metadata required by the Camera2 API
      * from the available HAL1 CameraParameters and CameraInfo.
      *
-     * Returns OK on success, or a negative error code.
+     * Sets Status to a service-specific error on failure
      */
-    status_t            generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo);
+    binder::Status      generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo);
 
     static int getCallingPid();
 
@@ -738,8 +749,8 @@
      */
     static int getCameraPriorityFromProcState(int procState);
 
-    static status_t makeClient(const sp<CameraService>& cameraService,
-            const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
+    static binder::Status makeClient(const sp<CameraService>& cameraService,
+            const sp<IInterface>& cameraCb, const String16& packageName, int cameraId,
             int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
             int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
             /*out*/sp<BasicClient>* client);
@@ -754,12 +765,12 @@
 };
 
 template<class Func>
-void CameraService::CameraState::updateStatus(ICameraServiceListener::Status status,
+void CameraService::CameraState::updateStatus(int32_t status,
         const String8& cameraId,
-        std::initializer_list<ICameraServiceListener::Status> rejectSourceStates,
+        std::initializer_list<int32_t> rejectSourceStates,
         Func onStatusUpdatedLocked) {
     Mutex::Autolock lock(mStatusLock);
-    ICameraServiceListener::Status oldStatus = mStatus;
+    int32_t oldStatus = mStatus;
     mStatus = status;
 
     if (oldStatus == status) {
@@ -769,9 +780,9 @@
     ALOGV("%s: Status has changed for camera ID %s from %#x to %#x", __FUNCTION__,
             cameraId.string(), oldStatus, status);
 
-    if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT &&
-        (status != ICameraServiceListener::STATUS_PRESENT &&
-         status != ICameraServiceListener::STATUS_ENUMERATING)) {
+    if (oldStatus == hardware::ICameraServiceListener::STATUS_NOT_PRESENT &&
+            (status != hardware::ICameraServiceListener::STATUS_PRESENT &&
+             status != hardware::ICameraServiceListener::STATUS_ENUMERATING)) {
 
         ALOGW("%s: From NOT_PRESENT can only transition into PRESENT or ENUMERATING",
                 __FUNCTION__);
@@ -796,15 +807,25 @@
     onStatusUpdatedLocked(cameraId, status);
 }
 
+#define STATUS_ERROR(errorCode, errorString) \
+    binder::Status::fromServiceSpecificError(errorCode, \
+            String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+    binder::Status::fromServiceSpecificError(errorCode, \
+            String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, __VA_ARGS__))
+
 
 template<class CALLBACK, class CLIENT>
-status_t CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
-        int halVersion, const String16& clientPackageName, int clientUid,
+binder::Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
+        int halVersion, const String16& clientPackageName, int clientUid, int clientPid,
         apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
         /*out*/sp<CLIENT>& device) {
-    status_t ret = NO_ERROR;
+    binder::Status ret = binder::Status::ok();
+
     String8 clientName8(clientPackageName);
-    int clientPid = getCallingPid();
+
+    int originalClientPid = 0;
 
     ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
             "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
@@ -818,13 +839,16 @@
                 AutoConditionLock::waitAndAcquire(mServiceLockWrapper, DEFAULT_CONNECT_TIMEOUT_NS);
 
         if (lock == nullptr) {
-            ALOGE("CameraService::connect X (PID %d) rejected (too many other clients connecting)."
+            ALOGE("CameraService::connect (PID %d) rejected (too many other clients connecting)."
                     , clientPid);
-            return -EBUSY;
+            return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                    "Cannot open camera %s for \"%s\" (PID %d): Too many other clients connecting",
+                    cameraId.string(), clientName8.string(), clientPid);
         }
 
         // Enforce client permissions and do basic sanity checks
-        if((ret = validateConnectLocked(cameraId, /*inout*/clientUid)) != NO_ERROR) {
+        if(!(ret = validateConnectLocked(cameraId, clientName8,
+                /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
             return ret;
         }
 
@@ -833,22 +857,37 @@
         if (shimUpdateOnly) {
             auto cameraState = getCameraState(cameraId);
             if (cameraState != nullptr) {
-                if (!cameraState->getShimParams().isEmpty()) return NO_ERROR;
+                if (!cameraState->getShimParams().isEmpty()) return ret;
             }
         }
 
+        status_t err;
+
         sp<BasicClient> clientTmp = nullptr;
         std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
-        if ((ret = handleEvictionsLocked(cameraId, clientPid, effectiveApiLevel,
+        if ((err = handleEvictionsLocked(cameraId, originalClientPid, effectiveApiLevel,
                 IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
                 /*out*/&partial)) != NO_ERROR) {
-            return ret;
+            switch (err) {
+                case -ENODEV:
+                    return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                            "No camera device with ID \"%s\" currently available",
+                            cameraId.string());
+                case -EBUSY:
+                    return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+                            "Higher-priority client using camera, ID \"%s\" currently unavailable",
+                            cameraId.string());
+                default:
+                    return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                            "Unexpected error %s (%d) opening camera \"%s\"",
+                            strerror(-err), err, cameraId.string());
+            }
         }
 
         if (clientTmp.get() != nullptr) {
             // Handle special case for API1 MediaRecorder where the existing client is returned
             device = static_cast<CLIENT*>(clientTmp.get());
-            return NO_ERROR;
+            return ret;
         }
 
         // give flashlight a chance to close devices if necessary.
@@ -859,15 +898,16 @@
         if (id == -1) {
             ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
                     cameraId.string());
-            return BAD_VALUE;
+            return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Bad camera ID \"%s\" passed to camera open", cameraId.string());
         }
 
         int facing = -1;
         int deviceVersion = getDeviceVersion(id, /*out*/&facing);
         sp<BasicClient> tmp = nullptr;
-        if((ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
+        if(!(ret = makeClient(this, cameraCb, clientPackageName, id, facing, clientPid,
                 clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
-                /*out*/&tmp)) != NO_ERROR) {
+                /*out*/&tmp)).isOk()) {
             return ret;
         }
         client = static_cast<CLIENT*>(tmp.get());
@@ -875,9 +915,32 @@
         LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
                 __FUNCTION__);
 
-        if ((ret = client->initialize(mModule)) != OK) {
+        if ((err = client->initialize(mModule)) != OK) {
             ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
-            return ret;
+            // Errors could be from the HAL module open call or from AppOpsManager
+            switch(err) {
+                case BAD_VALUE:
+                    return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                            "Illegal argument to HAL module for camera \"%s\"", cameraId.string());
+                case -EBUSY:
+                    return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+                            "Camera \"%s\" is already open", cameraId.string());
+                case -EUSERS:
+                    return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                            "Too many cameras already open, cannot open camera \"%s\"",
+                            cameraId.string());
+                case PERMISSION_DENIED:
+                    return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+                            "No permission to open camera \"%s\"", cameraId.string());
+                case -EACCES:
+                    return STATUS_ERROR_FMT(ERROR_DISABLED,
+                            "Camera \"%s\" disabled by policy", cameraId.string());
+                case -ENODEV:
+                default:
+                    return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                            "Failed to initialize camera \"%s\": %s (%d)", cameraId.string(),
+                            strerror(-err), err);
+            }
         }
 
         // Update shim paremeters for legacy clients
@@ -910,9 +973,12 @@
     // Important: release the mutex here so the client can call back into the service from its
     // destructor (can be at the end of the call)
     device = client;
-    return NO_ERROR;
+    return ret;
 }
 
+#undef STATUS_ERROR_FMT
+#undef STATUS_ERROR
+
 } // namespace android
 
 #endif
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index fbd4034..c8e64fe 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -24,6 +24,7 @@
 
 #include <cutils/properties.h>
 #include <gui/Surface.h>
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
 
 #include "api1/Camera2Client.h"
 
@@ -32,7 +33,6 @@
 #include "api1/client2/CaptureSequencer.h"
 #include "api1/client2/CallbackProcessor.h"
 #include "api1/client2/ZslProcessor.h"
-#include "api1/client2/ZslProcessor3.h"
 
 #define ALOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__);
 #define ALOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__);
@@ -47,7 +47,7 @@
 // Interface used by CameraService
 
 Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
-        const sp<ICameraClient>& cameraClient,
+        const sp<hardware::ICameraClient>& cameraClient,
         const String16& clientPackageName,
         int cameraId,
         int cameraFacing,
@@ -94,7 +94,6 @@
     mStreamingProcessor = new StreamingProcessor(this);
     threadName = String8::format("C2-%d-StreamProc",
             mCameraId);
-    mStreamingProcessor->run(threadName.string());
 
     mFrameProcessor = new FrameProcessor(mDevice, this);
     threadName = String8::format("C2-%d-FrameProc",
@@ -111,30 +110,11 @@
             mCameraId);
     mJpegProcessor->run(threadName.string());
 
-    switch (mDeviceVersion) {
-        case CAMERA_DEVICE_API_VERSION_2_0: {
-            sp<ZslProcessor> zslProc =
-                    new ZslProcessor(this, mCaptureSequencer);
-            mZslProcessor = zslProc;
-            mZslProcessorThread = zslProc;
-            break;
-        }
-        case CAMERA_DEVICE_API_VERSION_3_0:
-        case CAMERA_DEVICE_API_VERSION_3_1:
-        case CAMERA_DEVICE_API_VERSION_3_2:
-        case CAMERA_DEVICE_API_VERSION_3_3: {
-            sp<ZslProcessor3> zslProc =
-                    new ZslProcessor3(this, mCaptureSequencer);
-            mZslProcessor = zslProc;
-            mZslProcessorThread = zslProc;
-            break;
-        }
-        default:
-            break;
-    }
+    mZslProcessor = new ZslProcessor(this, mCaptureSequencer);
+
     threadName = String8::format("C2-%d-ZslProc",
             mCameraId);
-    mZslProcessorThread->run(threadName.string());
+    mZslProcessor->run(threadName.string());
 
     mCallbackProcessor = new CallbackProcessor(this);
     threadName = String8::format("C2-%d-CallbkProc",
@@ -388,15 +368,16 @@
 
 // ICamera interface
 
-void Camera2Client::disconnect() {
+binder::Status Camera2Client::disconnect() {
     ATRACE_CALL();
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    // Allow both client and the media server to disconnect at all times
+    binder::Status res = binder::Status::ok();
+    // Allow both client and the cameraserver to disconnect at all times
     int callingPid = getCallingPid();
-    if (callingPid != mClientPid && callingPid != mServicePid) return;
+    if (callingPid != mClientPid && callingPid != mServicePid) return res;
 
-    if (mDevice == 0) return;
+    if (mDevice == 0) return res;
 
     ALOGV("Camera %d: Shutting down", mCameraId);
 
@@ -410,15 +391,14 @@
 
     {
         SharedParameters::Lock l(mParameters);
-        if (l.mParameters.state == Parameters::DISCONNECTED) return;
+        if (l.mParameters.state == Parameters::DISCONNECTED) return res;
         l.mParameters.state = Parameters::DISCONNECTED;
     }
 
-    mStreamingProcessor->requestExit();
     mFrameProcessor->requestExit();
     mCaptureSequencer->requestExit();
     mJpegProcessor->requestExit();
-    mZslProcessorThread->requestExit();
+    mZslProcessor->requestExit();
     mCallbackProcessor->requestExit();
 
     ALOGV("Camera %d: Waiting for threads", mCameraId);
@@ -428,11 +408,10 @@
         // complete callbacks that re-enter Camera2Client
         mBinderSerializationLock.unlock();
 
-        mStreamingProcessor->join();
         mFrameProcessor->join();
         mCaptureSequencer->join();
         mJpegProcessor->join();
-        mZslProcessorThread->join();
+        mZslProcessor->join();
         mCallbackProcessor->join();
 
         mBinderSerializationLock.lock();
@@ -446,9 +425,6 @@
     mCallbackProcessor->deleteStream();
     mZslProcessor->deleteStream();
 
-    // Remove all ZSL stream state before disconnect; needed to work around b/15408128.
-    mZslProcessor->disconnect();
-
     ALOGV("Camera %d: Disconnecting device", mCameraId);
 
     mDevice->disconnect();
@@ -456,9 +432,11 @@
     mDevice.clear();
 
     CameraService::Client::disconnect();
+
+    return res;
 }
 
-status_t Camera2Client::connect(const sp<ICameraClient>& client) {
+status_t Camera2Client::connect(const sp<hardware::ICameraClient>& client) {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
     Mutex::Autolock icl(mBinderSerializationLock);
@@ -765,8 +743,8 @@
 
     // We could wait to create the JPEG output stream until first actual use
     // (first takePicture call). However, this would substantially increase the
-    // first capture latency on HAL3 devices, and potentially on some HAL2
-    // devices. So create it unconditionally at preview start. As a drawback,
+    // first capture latency on HAL3 devices.
+    // So create it unconditionally at preview start. As a drawback,
     // this increases gralloc memory consumption for applications that don't
     // ever take a picture. Do not enter this mode when jpeg stream will slow
     // down preview.
@@ -971,7 +949,7 @@
     return l.mParameters.state == Parameters::PREVIEW;
 }
 
-status_t Camera2Client::storeMetaDataInBuffers(bool enabled) {
+status_t Camera2Client::setVideoBufferMode(int32_t videoBufferMode) {
     ATRACE_CALL();
     Mutex::Autolock icl(mBinderSerializationLock);
     status_t res;
@@ -990,7 +968,12 @@
             break;
     }
 
-    l.mParameters.storeMetadataInBuffers = enabled;
+    if (videoBufferMode != VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
+        ALOGE("%s: %d: Only video buffer queue is supported", __FUNCTION__, __LINE__);
+        return BAD_VALUE;
+    }
+
+    l.mParameters.videoBufferMode = videoBufferMode;
 
     return OK;
 }
@@ -1036,10 +1019,14 @@
             return INVALID_OPERATION;
     };
 
-    if (!params.storeMetadataInBuffers) {
-        ALOGE("%s: Camera %d: Recording only supported in metadata mode, but "
-                "non-metadata recording mode requested!", __FUNCTION__,
-                mCameraId);
+    if (params.videoBufferMode != VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
+        ALOGE("%s: Camera %d: Recording only supported buffer queue mode, but "
+                "mode %d is requested!", __FUNCTION__, mCameraId, params.videoBufferMode);
+        return INVALID_OPERATION;
+    }
+
+    if (!mStreamingProcessor->haveValidRecordingWindow()) {
+        ALOGE("%s: No valid recording window", __FUNCTION__);
         return INVALID_OPERATION;
     }
 
@@ -1073,35 +1060,33 @@
         }
     }
 
-    // On current HALs, clean up ZSL before transitioning into recording
-    if (mDeviceVersion != CAMERA_DEVICE_API_VERSION_2_0) {
-        if (mZslProcessor->getStreamId() != NO_STREAM) {
-            ALOGV("%s: Camera %d: Clearing out zsl stream before "
-                    "creating recording stream", __FUNCTION__, mCameraId);
-            res = mStreamingProcessor->stopStream();
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
-                        __FUNCTION__, mCameraId);
-                return res;
-            }
-            res = mDevice->waitUntilDrained();
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
-                        __FUNCTION__, mCameraId, strerror(-res), res);
-            }
-            res = mZslProcessor->clearZslQueue();
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Can't clear zsl queue",
-                        __FUNCTION__, mCameraId);
-                return res;
-            }
-            res = mZslProcessor->deleteStream();
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to delete zsl stream before "
-                        "record: %s (%d)", __FUNCTION__, mCameraId,
-                        strerror(-res), res);
-                return res;
-            }
+    // Clean up ZSL before transitioning into recording
+    if (mZslProcessor->getStreamId() != NO_STREAM) {
+        ALOGV("%s: Camera %d: Clearing out zsl stream before "
+                "creating recording stream", __FUNCTION__, mCameraId);
+        res = mStreamingProcessor->stopStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't stop streaming to delete callback stream",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+        res = mDevice->waitUntilDrained();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+        }
+        res = mZslProcessor->clearZslQueue();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't clear zsl queue",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+        res = mZslProcessor->deleteStream();
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to delete zsl stream before "
+                    "record: %s (%d)", __FUNCTION__, mCameraId,
+                    strerror(-res), res);
+            return res;
         }
     }
 
@@ -1109,56 +1094,43 @@
     // and we can't fail record start without stagefright asserting.
     params.previewCallbackFlags = 0;
 
-    if (mDeviceVersion != CAMERA_DEVICE_API_VERSION_2_0) {
-        // For newer devices, may need to reconfigure video snapshot JPEG sizes
-        // during recording startup, so need a more complex sequence here to
-        // ensure an early stream reconfiguration doesn't happen
-        bool recordingStreamNeedsUpdate;
-        res = mStreamingProcessor->recordingStreamNeedsUpdate(params, &recordingStreamNeedsUpdate);
+    // May need to reconfigure video snapshot JPEG sizes
+    // during recording startup, so need a more complex sequence here to
+    // ensure an early stream reconfiguration doesn't happen
+    bool recordingStreamNeedsUpdate;
+    res = mStreamingProcessor->recordingStreamNeedsUpdate(params, &recordingStreamNeedsUpdate);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Can't query recording stream",
+                __FUNCTION__, mCameraId);
+        return res;
+    }
+
+    if (recordingStreamNeedsUpdate) {
+        // Need to stop stream here so updateProcessorStream won't trigger configureStream
+        // Right now camera device cannot handle configureStream failure gracefully
+        // when device is streaming
+        res = mStreamingProcessor->stopStream();
         if (res != OK) {
-            ALOGE("%s: Camera %d: Can't query recording stream",
-                    __FUNCTION__, mCameraId);
+            ALOGE("%s: Camera %d: Can't stop streaming to update record "
+                    "stream", __FUNCTION__, mCameraId);
             return res;
         }
-
-        if (recordingStreamNeedsUpdate) {
-            // Need to stop stream here so updateProcessorStream won't trigger configureStream
-            // Right now camera device cannot handle configureStream failure gracefully
-            // when device is streaming
-            res = mStreamingProcessor->stopStream();
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Can't stop streaming to update record "
-                        "stream", __FUNCTION__, mCameraId);
-                return res;
-            }
-            res = mDevice->waitUntilDrained();
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Waiting to stop streaming failed: "
-                        "%s (%d)", __FUNCTION__, mCameraId,
-                        strerror(-res), res);
-            }
-
-            res = updateProcessorStream<
-                    StreamingProcessor,
-                    &StreamingProcessor::updateRecordingStream>(
-                        mStreamingProcessor,
-                        params);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to update recording stream: "
-                        "%s (%d)", __FUNCTION__, mCameraId,
-                        strerror(-res), res);
-                return res;
-            }
-        }
-    } else {
-        // Maintain call sequencing for HALv2 devices.
-        res = updateProcessorStream<
-                StreamingProcessor,
-                &StreamingProcessor::updateRecordingStream>(mStreamingProcessor,
-                    params);
+        res = mDevice->waitUntilDrained();
         if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to update recording stream: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
+            ALOGE("%s: Camera %d: Waiting to stop streaming failed: "
+                    "%s (%d)", __FUNCTION__, mCameraId,
+                    strerror(-res), res);
+        }
+
+        res = updateProcessorStream<
+            StreamingProcessor,
+            &StreamingProcessor::updateRecordingStream>(
+                                                        mStreamingProcessor,
+                                                        params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to update recording stream: "
+                    "%s (%d)", __FUNCTION__, mCameraId,
+                    strerror(-res), res);
             return res;
         }
     }
@@ -1218,28 +1190,28 @@
 
     mCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
 
-    // Remove recording stream to prevent it from slowing down takePicture later
-    if (!l.mParameters.recordingHint && l.mParameters.isJpegSizeOverridden()) {
-        res = stopStream();
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Can't stop streaming: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-        }
-        res = mDevice->waitUntilDrained();
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-        }
-        // Clean up recording stream
-        res = mStreamingProcessor->deleteRecordingStream();
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to delete recording stream before "
-                    "stop preview: %s (%d)",
-                    __FUNCTION__, mCameraId, strerror(-res), res);
-        }
-        l.mParameters.recoverOverriddenJpegSize();
+    // Remove recording stream because the video target may be abandoned soon.
+    res = stopStream();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Can't stop streaming: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
     }
 
+    res = mDevice->waitUntilDrained();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+    }
+    // Clean up recording stream
+    res = mStreamingProcessor->deleteRecordingStream();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to delete recording stream before "
+                "stop preview: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+    }
+    l.mParameters.recoverOverriddenJpegSize();
+
+    // Restart preview
     res = startPreviewL(l.mParameters, true);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to return to preview",
@@ -1265,11 +1237,15 @@
 }
 
 void Camera2Client::releaseRecordingFrame(const sp<IMemory>& mem) {
+    (void)mem;
     ATRACE_CALL();
-    Mutex::Autolock icl(mBinderSerializationLock);
-    if ( checkPid(__FUNCTION__) != OK) return;
+    ALOGW("%s: Not supported in buffer queue mode.", __FUNCTION__);
+}
 
-    mStreamingProcessor->releaseRecordingFrame(mem);
+void Camera2Client::releaseRecordingFrameHandle(native_handle_t *handle) {
+    (void)handle;
+    ATRACE_CALL();
+    ALOGW("%s: Not supported in buffer queue mode.", __FUNCTION__);
 }
 
 status_t Camera2Client::autoFocus() {
@@ -1571,10 +1547,10 @@
         case CAMERA_CMD_PING:
             return commandPingL();
         case CAMERA_CMD_SET_VIDEO_BUFFER_COUNT:
-            return commandSetVideoBufferCountL(arg1);
         case CAMERA_CMD_SET_VIDEO_FORMAT:
-            return commandSetVideoFormatL(arg1,
-                    static_cast<android_dataspace>(arg2));
+            ALOGE("%s: command %d (arguments %d, %d) is not supported.",
+                    __FUNCTION__, cmd, arg1, arg2);
+            return BAD_VALUE;
         default:
             ALOGE("%s: Unknown command %d (arguments %d, %d)",
                     __FUNCTION__, cmd, arg1, arg2);
@@ -1716,43 +1692,22 @@
     }
 }
 
-status_t Camera2Client::commandSetVideoBufferCountL(size_t count) {
-    if (recordingEnabledL()) {
-        ALOGE("%s: Camera %d: Error setting video buffer count after "
-                "recording was started", __FUNCTION__, mCameraId);
-        return INVALID_OPERATION;
-    }
-
-    return mStreamingProcessor->setRecordingBufferCount(count);
-}
-
-status_t Camera2Client::commandSetVideoFormatL(int format,
-        android_dataspace dataspace) {
-    if (recordingEnabledL()) {
-        ALOGE("%s: Camera %d: Error setting video format after "
-                "recording was started", __FUNCTION__, mCameraId);
-        return INVALID_OPERATION;
-    }
-
-    return mStreamingProcessor->setRecordingFormat(format, dataspace);
-}
-
-void Camera2Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+void Camera2Client::notifyError(int32_t errorCode,
         const CaptureResultExtras& resultExtras) {
     int32_t err = CAMERA_ERROR_UNKNOWN;
     switch(errorCode) {
-        case ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED:
             err = CAMERA_ERROR_RELEASED;
             break;
-        case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
             err = CAMERA_ERROR_UNKNOWN;
             break;
-        case ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE:
             err = CAMERA_ERROR_SERVER_DIED;
             break;
-        case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
-        case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
-        case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
             ALOGW("%s: Received recoverable error %d from HAL - ignoring, requestId %" PRId32,
                     __FUNCTION__, errorCode, resultExtras.requestId);
             return;
@@ -2160,6 +2115,84 @@
     return res;
 }
 
+status_t Camera2Client::setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer) {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    sp<IBinder> binder = IInterface::asBinder(bufferProducer);
+    if (binder == mVideoSurface) {
+        ALOGV("%s: Camera %d: New video window is same as old video window",
+                __FUNCTION__, mCameraId);
+        return NO_ERROR;
+    }
+
+    sp<Surface> window;
+    int format;
+    android_dataspace dataSpace;
+
+    if (bufferProducer != nullptr) {
+        // Using controlledByApp flag to ensure that the buffer queue remains in
+        // async mode for the old camera API, where many applications depend
+        // on that behavior.
+        window = new Surface(bufferProducer, /*controlledByApp*/ true);
+
+        ANativeWindow *anw = window.get();
+
+        if ((res = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+            ALOGE("%s: Failed to query Surface format", __FUNCTION__);
+            return res;
+        }
+
+        if ((res = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+                                reinterpret_cast<int*>(&dataSpace))) != OK) {
+            ALOGE("%s: Failed to query Surface dataSpace", __FUNCTION__);
+            return res;
+        }
+    }
+
+    Parameters::State state;
+    {
+        SharedParameters::Lock l(mParameters);
+        state = l.mParameters.state;
+    }
+
+    switch (state) {
+        case Parameters::STOPPED:
+        case Parameters::WAITING_FOR_PREVIEW_WINDOW:
+        case Parameters::PREVIEW:
+            // OK
+            break;
+        case Parameters::DISCONNECTED:
+        case Parameters::RECORD:
+        case Parameters::STILL_CAPTURE:
+        case Parameters::VIDEO_SNAPSHOT:
+        default:
+            ALOGE("%s: Camera %d: Cannot set video target while in state %s",
+                    __FUNCTION__, mCameraId,
+                    Parameters::getStateName(state));
+            return INVALID_OPERATION;
+    }
+
+    mVideoSurface = binder;
+    res = mStreamingProcessor->setRecordingWindow(window);
+    if (res != OK) {
+        ALOGE("%s: Unable to set new recording window: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    {
+        SharedParameters::Lock l(mParameters);
+        l.mParameters.videoFormat = format;
+        l.mParameters.videoDataSpace = dataSpace;
+    }
+
+    return OK;
+}
+
 const char* Camera2Client::kAutofocusLabel = "autofocus";
 const char* Camera2Client::kTakepictureLabel = "take_picture";
 
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 7e7a284..3cb9e4f 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -24,7 +24,7 @@
 #include "api1/client2/FrameProcessor.h"
 //#include "api1/client2/StreamingProcessor.h"
 //#include "api1/client2/JpegProcessor.h"
-//#include "api1/client2/ZslProcessorInterface.h"
+//#include "api1/client2/ZslProcessor.h"
 //#include "api1/client2/CaptureSequencer.h"
 //#include "api1/client2/CallbackProcessor.h"
 
@@ -34,7 +34,7 @@
 
 class StreamingProcessor;
 class JpegProcessor;
-class ZslProcessorInterface;
+class ZslProcessor;
 class CaptureSequencer;
 class CallbackProcessor;
 
@@ -43,7 +43,7 @@
 class IMemory;
 /**
  * Interface between android.hardware.Camera API and Camera HAL device for versions
- * CAMERA_DEVICE_API_VERSION_2_0 and 3_0.
+ * CAMERA_DEVICE_API_VERSION_3_0 and above.
  */
 class Camera2Client :
         public Camera2ClientBase<CameraService::Client>
@@ -53,8 +53,8 @@
      * ICamera interface (see ICamera for details)
      */
 
-    virtual void            disconnect();
-    virtual status_t        connect(const sp<ICameraClient>& client);
+    virtual binder::Status  disconnect();
+    virtual status_t        connect(const sp<hardware::ICameraClient>& client);
     virtual status_t        lock();
     virtual status_t        unlock();
     virtual status_t        setPreviewTarget(
@@ -66,26 +66,28 @@
     virtual status_t        startPreview();
     virtual void            stopPreview();
     virtual bool            previewEnabled();
-    virtual status_t        storeMetaDataInBuffers(bool enabled);
+    virtual status_t        setVideoBufferMode(int32_t videoBufferMode);
     virtual status_t        startRecording();
     virtual void            stopRecording();
     virtual bool            recordingEnabled();
     virtual void            releaseRecordingFrame(const sp<IMemory>& mem);
+    virtual void            releaseRecordingFrameHandle(native_handle_t *handle);
     virtual status_t        autoFocus();
     virtual status_t        cancelAutoFocus();
     virtual status_t        takePicture(int msgType);
     virtual status_t        setParameters(const String8& params);
     virtual String8         getParameters() const;
     virtual status_t        sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
-    virtual void            notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+    virtual void            notifyError(int32_t errorCode,
                                         const CaptureResultExtras& resultExtras);
+    virtual status_t        setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
 
     /**
      * Interface used by CameraService
      */
 
     Camera2Client(const sp<CameraService>& cameraService,
-            const sp<ICameraClient>& cameraClient,
+            const sp<hardware::ICameraClient>& cameraClient,
             const String16& clientPackageName,
             int cameraId,
             int cameraFacing,
@@ -196,6 +198,7 @@
     /* Preview/Recording related members */
 
     sp<IBinder> mPreviewSurface;
+    sp<IBinder> mVideoSurface;
     sp<camera2::StreamingProcessor> mStreamingProcessor;
 
     /** Preview callback related members */
@@ -206,12 +209,7 @@
 
     sp<camera2::CaptureSequencer> mCaptureSequencer;
     sp<camera2::JpegProcessor> mJpegProcessor;
-    sp<camera2::ZslProcessorInterface> mZslProcessor;
-    sp<Thread> mZslProcessorThread;
-
-    /** Notification-related members */
-
-    bool mAfInMotion;
+    sp<camera2::ZslProcessor> mZslProcessor;
 
     /** Utility members */
     bool mLegacyMode;
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 6020e35..266fb03 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -19,6 +19,7 @@
 
 #include <cutils/properties.h>
 #include <gui/Surface.h>
+#include <media/hardware/HardwareAPI.h>
 
 #include "api1/CameraClient.h"
 #include "device1/CameraHardwareInterface.h"
@@ -34,7 +35,7 @@
 }
 
 CameraClient::CameraClient(const sp<CameraService>& cameraService,
-        const sp<ICameraClient>& cameraClient,
+        const sp<hardware::ICameraClient>& cameraClient,
         const String16& clientPackageName,
         int cameraId, int cameraFacing,
         int clientPid, int clientUid,
@@ -147,12 +148,12 @@
 }
 
 status_t CameraClient::checkPidAndHardware() const {
-    status_t result = checkPid();
-    if (result != NO_ERROR) return result;
     if (mHardware == 0) {
         ALOGE("attempt to use a camera after disconnect() (pid %d)", getCallingPid());
         return INVALID_OPERATION;
     }
+    status_t result = checkPid();
+    if (result != NO_ERROR) return result;
     return NO_ERROR;
 }
 
@@ -193,7 +194,7 @@
 }
 
 // connect a new client to the camera
-status_t CameraClient::connect(const sp<ICameraClient>& client) {
+status_t CameraClient::connect(const sp<hardware::ICameraClient>& client) {
     int callingPid = getCallingPid();
     LOG1("connect E (pid %d)", callingPid);
     Mutex::Autolock lock(mLock);
@@ -229,25 +230,21 @@
     }
 }
 
-void CameraClient::disconnect() {
+binder::Status CameraClient::disconnect() {
     int callingPid = getCallingPid();
     LOG1("disconnect E (pid %d)", callingPid);
     Mutex::Autolock lock(mLock);
 
-    // Allow both client and the media server to disconnect at all times
+    binder::Status res = binder::Status::ok();
+    // Allow both client and the cameraserver to disconnect at all times
     if (callingPid != mClientPid && callingPid != mServicePid) {
         ALOGW("different client - don't disconnect");
-        return;
-    }
-
-    if (mClientPid <= 0) {
-        LOG1("camera is unlocked (mClientPid = %d), don't tear down hardware", mClientPid);
-        return;
+        return res;
     }
 
     // Make sure disconnect() is done once and once only, whether it is called
     // from the user directly, or called by the destructor.
-    if (mHardware == 0) return;
+    if (mHardware == 0) return res;
 
     LOG1("hardware teardown");
     // Before destroying mHardware, we must make sure it's in the
@@ -273,6 +270,8 @@
     CameraService::Client::disconnect();
 
     LOG1("disconnect X (pid %d)", callingPid);
+
+    return res;
 }
 
 // ----------------------------------------------------------------------------
@@ -300,9 +299,8 @@
     // If preview has been already started, register preview buffers now.
     if (mHardware->previewEnabled()) {
         if (window != 0) {
-            native_window_set_scaling_mode(window.get(),
-                    NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-            native_window_set_buffers_transform(window.get(), mOrientation);
+            mHardware->setPreviewScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+            mHardware->setPreviewTransform(mOrientation);
             result = mHardware->setPreviewWindow(window);
         }
     }
@@ -409,10 +407,9 @@
     }
 
     if (mPreviewWindow != 0) {
-        native_window_set_scaling_mode(mPreviewWindow.get(),
-                NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-        native_window_set_buffers_transform(mPreviewWindow.get(),
-                mOrientation);
+        mHardware->setPreviewScalingMode(
+            NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+        mHardware->setPreviewTransform(mOrientation);
     }
     mHardware->setPreviewWindow(mPreviewWindow);
     result = mHardware->startPreview();
@@ -483,17 +480,66 @@
 void CameraClient::releaseRecordingFrame(const sp<IMemory>& mem) {
     Mutex::Autolock lock(mLock);
     if (checkPidAndHardware() != NO_ERROR) return;
+    if (mem == nullptr) {
+        android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26164272",
+                IPCThreadState::self()->getCallingUid(), nullptr, 0);
+        return;
+    }
+
     mHardware->releaseRecordingFrame(mem);
 }
 
-status_t CameraClient::storeMetaDataInBuffers(bool enabled)
-{
-    LOG1("storeMetaDataInBuffers: %s", enabled? "true": "false");
+void CameraClient::releaseRecordingFrameHandle(native_handle_t *handle) {
+    if (handle == nullptr) return;
+
+    sp<IMemory> dataPtr;
+    {
+        Mutex::Autolock l(mAvailableCallbackBuffersLock);
+        if (!mAvailableCallbackBuffers.empty()) {
+            dataPtr = mAvailableCallbackBuffers.back();
+            mAvailableCallbackBuffers.pop_back();
+        }
+    }
+
+    if (dataPtr == nullptr) {
+        ALOGE("%s: %d: No callback buffer available. Dropping a native handle.", __FUNCTION__,
+                __LINE__);
+        native_handle_close(handle);
+        native_handle_delete(handle);
+        return;
+    } else if (dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
+        ALOGE("%s: %d: Callback buffer size doesn't match VideoNativeHandleMetadata", __FUNCTION__,
+                __LINE__);
+        native_handle_close(handle);
+        native_handle_delete(handle);
+        return;
+    }
+
+    VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->pointer());
+    metadata->eType = kMetadataBufferTypeNativeHandleSource;
+    metadata->pHandle = handle;
+
+    mHardware->releaseRecordingFrame(dataPtr);
+}
+
+status_t CameraClient::setVideoBufferMode(int32_t videoBufferMode) {
+    LOG1("setVideoBufferMode: %d", videoBufferMode);
+    bool enableMetadataInBuffers = false;
+
+    if (videoBufferMode == VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA) {
+        enableMetadataInBuffers = true;
+    } else if (videoBufferMode != VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV) {
+        ALOGE("%s: %d: videoBufferMode %d is not supported.", __FUNCTION__, __LINE__,
+                videoBufferMode);
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock lock(mLock);
     if (checkPidAndHardware() != NO_ERROR) {
         return UNKNOWN_ERROR;
     }
-    return mHardware->storeMetaDataInBuffers(enabled);
+
+    return mHardware->storeMetaDataInBuffers(enableMetadataInBuffers);
 }
 
 bool CameraClient::previewEnabled() {
@@ -636,8 +682,7 @@
         if (mOrientation != orientation) {
             mOrientation = orientation;
             if (mPreviewWindow != 0) {
-                native_window_set_buffers_transform(mPreviewWindow.get(),
-                        mOrientation);
+                mHardware->setPreviewTransform(mOrientation);
             }
         }
         return OK;
@@ -795,7 +840,7 @@
         mCameraService->playSound(CameraService::SOUND_SHUTTER);
     }
 
-    sp<ICameraClient> c = mRemoteCallback;
+    sp<hardware::ICameraClient> c = mRemoteCallback;
     if (c != 0) {
         mLock.unlock();
         c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0);
@@ -832,7 +877,7 @@
     }
 
     // hold a strong pointer to the client
-    sp<ICameraClient> c = mRemoteCallback;
+    sp<hardware::ICameraClient> c = mRemoteCallback;
 
     // clear callback flags if no client or one-shot mode
     if (c == 0 || (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK)) {
@@ -862,7 +907,7 @@
 void CameraClient::handlePostview(const sp<IMemory>& mem) {
     disableMsgType(CAMERA_MSG_POSTVIEW_FRAME);
 
-    sp<ICameraClient> c = mRemoteCallback;
+    sp<hardware::ICameraClient> c = mRemoteCallback;
     mLock.unlock();
     if (c != 0) {
         c->dataCallback(CAMERA_MSG_POSTVIEW_FRAME, mem, NULL);
@@ -877,7 +922,7 @@
     size_t size;
     sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
 
-    sp<ICameraClient> c = mRemoteCallback;
+    sp<hardware::ICameraClient> c = mRemoteCallback;
     mLock.unlock();
     if (c != 0) {
         c->dataCallback(CAMERA_MSG_RAW_IMAGE, mem, NULL);
@@ -888,7 +933,7 @@
 void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
     disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);
 
-    sp<ICameraClient> c = mRemoteCallback;
+    sp<hardware::ICameraClient> c = mRemoteCallback;
     mLock.unlock();
     if (c != 0) {
         c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL);
@@ -898,7 +943,7 @@
 
 void CameraClient::handleGenericNotify(int32_t msgType,
     int32_t ext1, int32_t ext2) {
-    sp<ICameraClient> c = mRemoteCallback;
+    sp<hardware::ICameraClient> c = mRemoteCallback;
     mLock.unlock();
     if (c != 0) {
         c->notifyCallback(msgType, ext1, ext2);
@@ -907,7 +952,7 @@
 
 void CameraClient::handleGenericData(int32_t msgType,
     const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) {
-    sp<ICameraClient> c = mRemoteCallback;
+    sp<hardware::ICameraClient> c = mRemoteCallback;
     mLock.unlock();
     if (c != 0) {
         c->dataCallback(msgType, dataPtr, metadata);
@@ -916,15 +961,35 @@
 
 void CameraClient::handleGenericDataTimestamp(nsecs_t timestamp,
     int32_t msgType, const sp<IMemory>& dataPtr) {
-    sp<ICameraClient> c = mRemoteCallback;
+    sp<hardware::ICameraClient> c = mRemoteCallback;
     mLock.unlock();
-    if (c != 0) {
-        c->dataCallbackTimestamp(timestamp, msgType, dataPtr);
+    if (c != 0 && dataPtr != nullptr) {
+        native_handle_t* handle = nullptr;
+
+        // Check if dataPtr contains a VideoNativeHandleMetadata.
+        if (dataPtr->size() == sizeof(VideoNativeHandleMetadata)) {
+            VideoNativeHandleMetadata *metadata =
+                (VideoNativeHandleMetadata*)(dataPtr->pointer());
+            if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
+                handle = metadata->pHandle;
+            }
+        }
+
+        // If dataPtr contains a native handle, send it via recordingFrameHandleCallbackTimestamp.
+        if (handle != nullptr) {
+            {
+                Mutex::Autolock l(mAvailableCallbackBuffersLock);
+                mAvailableCallbackBuffers.push_back(dataPtr);
+            }
+            c->recordingFrameHandleCallbackTimestamp(timestamp, handle);
+        } else {
+            c->dataCallbackTimestamp(timestamp, msgType, dataPtr);
+        }
     }
 }
 
 void CameraClient::copyFrameAndPostCopiedFrame(
-        int32_t msgType, const sp<ICameraClient>& client,
+        int32_t msgType, const sp<hardware::ICameraClient>& client,
         const sp<IMemoryHeap>& heap, size_t offset, size_t size,
         camera_frame_metadata_t *metadata) {
     LOG2("copyFrameAndPostCopiedFrame");
@@ -995,4 +1060,10 @@
     return -1;
 }
 
+status_t CameraClient::setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer) {
+    (void)bufferProducer;
+    ALOGE("%s: %d: CameraClient doesn't support setting a video target.", __FUNCTION__, __LINE__);
+    return INVALID_OPERATION;
+}
+
 }; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 17999a5..4f46fc4 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -33,8 +33,8 @@
 {
 public:
     // ICamera interface (see ICamera for details)
-    virtual void            disconnect();
-    virtual status_t        connect(const sp<ICameraClient>& client);
+    virtual binder::Status  disconnect();
+    virtual status_t        connect(const sp<hardware::ICameraClient>& client);
     virtual status_t        lock();
     virtual status_t        unlock();
     virtual status_t        setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer);
@@ -44,21 +44,23 @@
     virtual status_t        startPreview();
     virtual void            stopPreview();
     virtual bool            previewEnabled();
-    virtual status_t        storeMetaDataInBuffers(bool enabled);
+    virtual status_t        setVideoBufferMode(int32_t videoBufferMode);
     virtual status_t        startRecording();
     virtual void            stopRecording();
     virtual bool            recordingEnabled();
     virtual void            releaseRecordingFrame(const sp<IMemory>& mem);
+    virtual void            releaseRecordingFrameHandle(native_handle_t *handle);
     virtual status_t        autoFocus();
     virtual status_t        cancelAutoFocus();
     virtual status_t        takePicture(int msgType);
     virtual status_t        setParameters(const String8& params);
     virtual String8         getParameters() const;
     virtual status_t        sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
+    virtual status_t        setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
 
     // Interface used by CameraService
     CameraClient(const sp<CameraService>& cameraService,
-            const sp<ICameraClient>& cameraClient,
+            const sp<hardware::ICameraClient>& cameraClient,
             const String16& clientPackageName,
             int cameraId,
             int cameraFacing,
@@ -115,7 +117,7 @@
 
     void                    copyFrameAndPostCopiedFrame(
         int32_t msgType,
-        const sp<ICameraClient>& client,
+        const sp<hardware::ICameraClient>& client,
         const sp<IMemoryHeap>& heap,
         size_t offset, size_t size,
         camera_frame_metadata_t *metadata);
@@ -147,6 +149,12 @@
     // Debugging information
     CameraParameters                mLatestSetParameters;
 
+    // mAvailableCallbackBuffers stores sp<IMemory> that HAL uses to send VideoNativeHandleMetadata.
+    // It will be used to send VideoNativeHandleMetadata back to HAL when camera receives the
+    // native handle from releaseRecordingFrameHandle.
+    Mutex                           mAvailableCallbackBuffersLock;
+    std::vector<sp<IMemory>>        mAvailableCallbackBuffers;
+
     // We need to avoid the deadlock when the incoming command thread and
     // the CameraHardwareInterface callback thread both want to grab mLock.
     // An extra flag is used to tell the callback thread that it should stop
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.cpp b/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
deleted file mode 100644
index 5502dcb..0000000
--- a/services/camera/libcameraservice/api1/client2/BurstCapture.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Camera2-BurstCapture"
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-
-#include "BurstCapture.h"
-
-#include "api1/Camera2Client.h"
-#include "api1/client2/JpegCompressor.h"
-
-namespace android {
-namespace camera2 {
-
-BurstCapture::BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer):
-    mCaptureStreamId(NO_STREAM),
-    mClient(client),
-    mSequencer(sequencer)
-{
-}
-
-BurstCapture::~BurstCapture() {
-}
-
-status_t BurstCapture::start(Vector<CameraMetadata> &/*metadatas*/,
-                             int32_t /*firstCaptureId*/) {
-    ALOGE("Not completely implemented");
-    return INVALID_OPERATION;
-}
-
-void BurstCapture::onFrameAvailable(const BufferItem &/*item*/) {
-    ALOGV("%s", __FUNCTION__);
-    Mutex::Autolock l(mInputMutex);
-    if(!mInputChanged) {
-        mInputChanged = true;
-        mInputSignal.signal();
-    }
-}
-
-bool BurstCapture::threadLoop() {
-    status_t res;
-    {
-        Mutex::Autolock l(mInputMutex);
-        while(!mInputChanged) {
-            res = mInputSignal.waitRelative(mInputMutex, kWaitDuration);
-            if(res == TIMED_OUT) return true;
-        }
-        mInputChanged = false;
-    }
-
-    do {
-        sp<Camera2Client> client = mClient.promote();
-        if(client == 0) return false;
-        ALOGV("%s: Calling processFrameAvailable()", __FUNCTION__);
-        res = processFrameAvailable(client);
-    } while(res == OK);
-
-    return true;
-}
-
-CpuConsumer::LockedBuffer* BurstCapture::jpegEncode(
-    CpuConsumer::LockedBuffer *imgBuffer,
-    int /*quality*/)
-{
-    ALOGV("%s", __FUNCTION__);
-
-    CpuConsumer::LockedBuffer *imgEncoded = new CpuConsumer::LockedBuffer;
-    uint8_t *data = new uint8_t[ANDROID_JPEG_MAX_SIZE];
-    imgEncoded->data = data;
-    imgEncoded->width = imgBuffer->width;
-    imgEncoded->height = imgBuffer->height;
-    imgEncoded->stride = imgBuffer->stride;
-
-    Vector<CpuConsumer::LockedBuffer*> buffers;
-    buffers.push_back(imgBuffer);
-    buffers.push_back(imgEncoded);
-
-    sp<JpegCompressor> jpeg = new JpegCompressor();
-    jpeg->start(buffers, 1);
-
-    bool success = jpeg->waitForDone(10 * 1e9);
-    if(success) {
-        return buffers[1];
-    }
-    else {
-        ALOGE("%s: JPEG encode timed out", __FUNCTION__);
-        return NULL;  // TODO: maybe change function return value to status_t
-    }
-}
-
-status_t BurstCapture::processFrameAvailable(sp<Camera2Client> &/*client*/) {
-    ALOGE("Not implemented");
-    return INVALID_OPERATION;
-}
-
-} // namespace camera2
-} // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/BurstCapture.h b/services/camera/libcameraservice/api1/client2/BurstCapture.h
deleted file mode 100644
index c3b7722..0000000
--- a/services/camera/libcameraservice/api1/client2/BurstCapture.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
-#define ANDROID_SERVERS_CAMERA_BURST_CAPTURE_H
-
-#include <camera/CameraMetadata.h>
-#include <binder/MemoryBase.h>
-#include <binder/MemoryHeapBase.h>
-#include <gui/CpuConsumer.h>
-
-#include "device2/Camera2Device.h"
-
-namespace android {
-
-class Camera2Client;
-
-namespace camera2 {
-
-class CaptureSequencer;
-
-class BurstCapture : public virtual Thread,
-                     public virtual CpuConsumer::FrameAvailableListener
-{
-public:
-    BurstCapture(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
-    virtual ~BurstCapture();
-
-    virtual void onFrameAvailable(const BufferItem& item);
-    virtual status_t start(Vector<CameraMetadata> &metadatas, int32_t firstCaptureId);
-
-protected:
-    Mutex mInputMutex;
-    bool mInputChanged;
-    Condition mInputSignal;
-    int mCaptureStreamId;
-    wp<Camera2Client> mClient;
-    wp<CaptureSequencer> mSequencer;
-
-    // Should only be accessed by processing thread
-    enum {
-        NO_STREAM = -1
-    };
-
-    CpuConsumer::LockedBuffer* jpegEncode(
-        CpuConsumer::LockedBuffer *imgBuffer,
-        int quality);
-
-    virtual status_t processFrameAvailable(sp<Camera2Client> &client);
-
-private:
-    virtual bool threadLoop();
-    static const nsecs_t kWaitDuration = 10000000; // 10 ms
-};
-
-} // namespace camera2
-} // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index 5f4fb22..b4b269a 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -155,7 +155,7 @@
                 callbackFormat, params.previewFormat);
         res = device->createStream(mCallbackWindow,
                 params.previewWidth, params.previewHeight, callbackFormat,
-                HAL_DATASPACE_JFIF, CAMERA3_STREAM_ROTATION_0, &mCallbackStreamId);
+                HAL_DATASPACE_V0_JFIF, CAMERA3_STREAM_ROTATION_0, &mCallbackStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
index a290536..a22442f 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
@@ -33,7 +33,7 @@
 
 namespace camera2 {
 
-class Parameters;
+struct Parameters;
 
 /***
  * Still image capture output image processing
@@ -75,7 +75,6 @@
     sp<CpuConsumer>    mCallbackConsumer;
     sp<Surface>        mCallbackWindow;
     sp<Camera2Heap>    mCallbackHeap;
-    int mCallbackHeapId;
     size_t mCallbackHeapHead, mCallbackHeapFree;
 
     virtual bool threadLoop();
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 5f7fd74..e3d6906 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -26,9 +26,8 @@
 
 #include "api1/Camera2Client.h"
 #include "api1/client2/CaptureSequencer.h"
-#include "api1/client2/BurstCapture.h"
 #include "api1/client2/Parameters.h"
-#include "api1/client2/ZslProcessorInterface.h"
+#include "api1/client2/ZslProcessor.h"
 
 namespace android {
 namespace camera2 {
@@ -42,6 +41,7 @@
         mNewAEState(false),
         mNewFrameReceived(false),
         mNewCaptureReceived(false),
+        mNewCaptureErrorCnt(0),
         mShutterNotified(false),
         mHalNotifiedShutter(false),
         mShutterCaptureId(-1),
@@ -59,7 +59,7 @@
     ALOGV("%s: Exit", __FUNCTION__);
 }
 
-void CaptureSequencer::setZslProcessor(wp<ZslProcessorInterface> processor) {
+void CaptureSequencer::setZslProcessor(wp<ZslProcessor> processor) {
     Mutex::Autolock l(mInputMutex);
     mZslProcessor = processor;
 }
@@ -111,6 +111,7 @@
 void CaptureSequencer::notifyShutter(const CaptureResultExtras& resultExtras,
                                      nsecs_t timestamp) {
     ATRACE_CALL();
+    (void) timestamp;
     Mutex::Autolock l(mInputMutex);
     if (!mHalNotifiedShutter && resultExtras.requestId == mShutterCaptureId) {
         mHalNotifiedShutter = true;
@@ -131,7 +132,7 @@
 }
 
 void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp,
-        sp<MemoryBase> captureBuffer) {
+        sp<MemoryBase> captureBuffer, bool captureError) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
     Mutex::Autolock l(mInputMutex);
@@ -139,6 +140,11 @@
     mCaptureBuffer = captureBuffer;
     if (!mNewCaptureReceived) {
         mNewCaptureReceived = true;
+        if (captureError) {
+            mNewCaptureErrorCnt++;
+        } else {
+            mNewCaptureErrorCnt = 0;
+        }
         mNewCaptureSignal.signal();
     }
 }
@@ -174,8 +180,6 @@
     "STANDARD_PRECAPTURE_WAIT",
     "STANDARD_CAPTURE",
     "STANDARD_CAPTURE_WAIT",
-    "BURST_CAPTURE_START",
-    "BURST_CAPTURE_WAIT",
     "DONE",
     "ERROR",
     "UNKNOWN"
@@ -192,8 +196,6 @@
     &CaptureSequencer::manageStandardPrecaptureWait,
     &CaptureSequencer::manageStandardCapture,
     &CaptureSequencer::manageStandardCaptureWait,
-    &CaptureSequencer::manageBurstCaptureStart,
-    &CaptureSequencer::manageBurstCaptureWait,
     &CaptureSequencer::manageDone,
 };
 
@@ -293,7 +295,7 @@
         }
         takePictureCounter = l.mParameters.takePictureCounter;
     }
-    sp<ZslProcessorInterface> processor = mZslProcessor.promote();
+    sp<ZslProcessor> processor = mZslProcessor.promote();
     if (processor != 0) {
         ALOGV("%s: Memory optimization, clearing ZSL queue",
               __FUNCTION__);
@@ -336,10 +338,6 @@
         return DONE;
     }
 
-    if(l.mParameters.lightFx != Parameters::LIGHTFX_NONE &&
-            l.mParameters.state == Parameters::STILL_CAPTURE) {
-        nextState = BURST_CAPTURE_START;
-    }
     else if (l.mParameters.zslMode &&
             l.mParameters.state == Parameters::STILL_CAPTURE &&
             l.mParameters.flashMode != Parameters::FLASH_MODE_ON) {
@@ -361,7 +359,7 @@
         sp<Camera2Client> &client) {
     ALOGV("%s", __FUNCTION__);
     status_t res;
-    sp<ZslProcessorInterface> processor = mZslProcessor.promote();
+    sp<ZslProcessor> processor = mZslProcessor.promote();
     if (processor == 0) {
         ALOGE("%s: No ZSL queue to use!", __FUNCTION__);
         return DONE;
@@ -631,6 +629,17 @@
             break;
         }
     }
+    if (mNewCaptureReceived) {
+        if (mNewCaptureErrorCnt > kMaxRetryCount) {
+            ALOGW("Exceeding multiple retry limit of %d due to buffer drop", kMaxRetryCount);
+            return DONE;
+        } else if (mNewCaptureErrorCnt > 0) {
+            ALOGW("Capture error happened, retry %d...", mNewCaptureErrorCnt);
+            mNewCaptureReceived = false;
+            return STANDARD_CAPTURE;
+        }
+    }
+
     if (mTimeoutCount <= 0) {
         ALOGW("Timed out waiting for capture to complete");
         return DONE;
@@ -664,76 +673,6 @@
     return STANDARD_CAPTURE_WAIT;
 }
 
-CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureStart(
-        sp<Camera2Client> &client) {
-    ALOGV("%s", __FUNCTION__);
-    status_t res;
-    ATRACE_CALL();
-
-    // check which burst mode is set, create respective burst object
-    {
-        SharedParameters::Lock l(client->getParameters());
-
-        res = updateCaptureRequest(l.mParameters, client);
-        if(res != OK) {
-            return DONE;
-        }
-
-        //
-        // check for burst mode type in mParameters here
-        //
-        mBurstCapture = new BurstCapture(client, this);
-    }
-
-    res = mCaptureRequest.update(ANDROID_REQUEST_ID, &mCaptureId, 1);
-    if (res == OK) {
-        res = mCaptureRequest.sort();
-    }
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)",
-                __FUNCTION__, client->getCameraId(), strerror(-res), res);
-        return DONE;
-    }
-
-    CameraMetadata captureCopy = mCaptureRequest;
-    if (captureCopy.entryCount() == 0) {
-        ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
-                __FUNCTION__, client->getCameraId());
-        return DONE;
-    }
-
-    Vector<CameraMetadata> requests;
-    requests.push(mCaptureRequest);
-    res = mBurstCapture->start(requests, mCaptureId);
-    mTimeoutCount = kMaxTimeoutsForCaptureEnd * 10;
-    return BURST_CAPTURE_WAIT;
-}
-
-CaptureSequencer::CaptureState CaptureSequencer::manageBurstCaptureWait(
-        sp<Camera2Client> &/*client*/) {
-    status_t res;
-    ATRACE_CALL();
-    while (!mNewCaptureReceived) {
-        res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
-        if (res == TIMED_OUT) {
-            mTimeoutCount--;
-            break;
-        }
-    }
-
-    if (mTimeoutCount <= 0) {
-        ALOGW("Timed out waiting for burst capture to complete");
-        return DONE;
-    }
-    if (mNewCaptureReceived) {
-        mNewCaptureReceived = false;
-        // TODO: update mCaptureId to last burst's capture ID + 1?
-        return DONE;
-    }
-
-    return BURST_CAPTURE_WAIT;
-}
-
 status_t CaptureSequencer::updateCaptureRequest(const Parameters &params,
         sp<Camera2Client> &client) {
     ATRACE_CALL();
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index 10252fb..a7c61d2 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -34,8 +34,7 @@
 
 namespace camera2 {
 
-class ZslProcessorInterface;
-class BurstCapture;
+class ZslProcessor;
 
 /**
  * Manages the still image capture process for
@@ -49,7 +48,7 @@
     ~CaptureSequencer();
 
     // Get reference to the ZslProcessor, which holds the ZSL buffers and frames
-    void setZslProcessor(wp<ZslProcessorInterface> processor);
+    void setZslProcessor(wp<ZslProcessor> processor);
 
     // Begin still image capture
     status_t startCapture(int msgType);
@@ -70,7 +69,7 @@
     virtual void onResultAvailable(const CaptureResult &result);
 
     // Notifications from the JPEG processor
-    void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer);
+    void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer, bool captureError);
 
     void dump(int fd, const Vector<String16>& args);
 
@@ -95,6 +94,7 @@
     Condition mNewFrameSignal;
 
     bool mNewCaptureReceived;
+    int32_t mNewCaptureErrorCnt;
     nsecs_t mCaptureTimestamp;
     sp<MemoryBase> mCaptureBuffer;
     Condition mNewCaptureSignal;
@@ -111,10 +111,10 @@
     static const int kMaxTimeoutsForPrecaptureStart = 10; // 1 sec
     static const int kMaxTimeoutsForPrecaptureEnd = 20;  // 2 sec
     static const int kMaxTimeoutsForCaptureEnd    = 40;  // 4 sec
+    static const int kMaxRetryCount = 3; // 3 retries in case of buffer drop
 
     wp<Camera2Client> mClient;
-    wp<ZslProcessorInterface> mZslProcessor;
-    sp<BurstCapture> mBurstCapture;
+    wp<ZslProcessor> mZslProcessor;
 
     enum CaptureState {
         IDLE,
@@ -126,8 +126,6 @@
         STANDARD_PRECAPTURE_WAIT,
         STANDARD_CAPTURE,
         STANDARD_CAPTURE_WAIT,
-        BURST_CAPTURE_START,
-        BURST_CAPTURE_WAIT,
         DONE,
         ERROR,
         NUM_CAPTURE_STATES
@@ -165,9 +163,6 @@
     CaptureState manageStandardCapture(sp<Camera2Client> &client);
     CaptureState manageStandardCaptureWait(sp<Camera2Client> &client);
 
-    CaptureState manageBurstCaptureStart(sp<Camera2Client> &client);
-    CaptureState manageBurstCaptureWait(sp<Camera2Client> &client);
-
     CaptureState manageDone(sp<Camera2Client> &client);
 
     // Utility methods
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 40d53b3..4d12015 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -53,7 +53,13 @@
         // Check if lens is fixed-focus
         if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED) {
             m3aState.afMode = ANDROID_CONTROL_AF_MODE_OFF;
+        } else {
+            m3aState.afMode = ANDROID_CONTROL_AF_MODE_AUTO;
         }
+        m3aState.awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+        m3aState.aeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+        m3aState.afState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+        m3aState.awbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
     }
 }
 
@@ -170,7 +176,7 @@
 
         entry = frame.find(ANDROID_SCALER_CROP_REGION);
         if (entry.count < 4) {
-            ALOGE("%s: Camera %d: Unable to read crop region (count = %d)",
+            ALOGE("%s: Camera %d: Unable to read crop region (count = %zu)",
                     __FUNCTION__, client->getCameraId(), entry.count);
             return res;
         }
@@ -253,80 +259,99 @@
     if (frameNumber <= mLast3AFrameNumber) {
         ALOGV("%s: Already sent 3A for frame number %d, skipping",
                 __FUNCTION__, frameNumber);
+
+        // Remove the entry if there is one for this frame number in mPending3AStates.
+        mPending3AStates.removeItem(frameNumber);
         return OK;
     }
 
-    mLast3AFrameNumber = frameNumber;
+    AlgState pendingState;
 
-    // Get 3A states from result metadata
+    ssize_t index = mPending3AStates.indexOfKey(frameNumber);
+    if (index != NAME_NOT_FOUND) {
+        pendingState = mPending3AStates.valueAt(index);
+    }
+
+    // Update 3A states from the result.
     bool gotAllStates = true;
 
-    AlgState new3aState;
-
     // TODO: Also use AE mode, AE trigger ID
+    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
+            &pendingState.afMode, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
-            &new3aState.afMode, frameNumber, cameraId);
+    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
+            &pendingState.awbMode, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
-            &new3aState.awbMode, frameNumber, cameraId);
+    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
+            &pendingState.aeState, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
-            &new3aState.aeState, frameNumber, cameraId);
+    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
+            &pendingState.afState, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
-            &new3aState.afState, frameNumber, cameraId);
-
-    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
-            &new3aState.awbState, frameNumber, cameraId);
+    gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
+            &pendingState.awbState, frameNumber, cameraId);
 
     if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
-        new3aState.afTriggerId = frame.mResultExtras.afTriggerId;
-        new3aState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
+        pendingState.afTriggerId = frame.mResultExtras.afTriggerId;
+        pendingState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
     } else {
-        gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AF_TRIGGER_ID,
-                 &new3aState.afTriggerId, frameNumber, cameraId);
+        gotAllStates &= updatePendingState<int32_t>(metadata,
+                ANDROID_CONTROL_AF_TRIGGER_ID, &pendingState.afTriggerId, frameNumber, cameraId);
 
-        gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
-                 &new3aState.aeTriggerId, frameNumber, cameraId);
+        gotAllStates &= updatePendingState<int32_t>(metadata,
+            ANDROID_CONTROL_AE_PRECAPTURE_ID, &pendingState.aeTriggerId, frameNumber, cameraId);
     }
 
-    if (!gotAllStates) return BAD_VALUE;
+    if (!gotAllStates) {
+        // If not all states are received, put the pending state to mPending3AStates.
+        if (index == NAME_NOT_FOUND) {
+            mPending3AStates.add(frameNumber, pendingState);
+        } else {
+            mPending3AStates.replaceValueAt(index, pendingState);
+        }
+        return NOT_ENOUGH_DATA;
+    }
 
-    if (new3aState.aeState != m3aState.aeState) {
+    // Once all 3A states are received, notify the client about 3A changes.
+    if (pendingState.aeState != m3aState.aeState) {
         ALOGV("%s: Camera %d: AE state %d->%d",
                 __FUNCTION__, cameraId,
-                m3aState.aeState, new3aState.aeState);
-        client->notifyAutoExposure(new3aState.aeState, new3aState.aeTriggerId);
+                m3aState.aeState, pendingState.aeState);
+        client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
     }
 
-    if (new3aState.afState != m3aState.afState ||
-        new3aState.afMode != m3aState.afMode ||
-        new3aState.afTriggerId != m3aState.afTriggerId) {
+    if (pendingState.afState != m3aState.afState ||
+        pendingState.afMode != m3aState.afMode ||
+        pendingState.afTriggerId != m3aState.afTriggerId) {
         ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
                 __FUNCTION__, cameraId,
-                m3aState.afState, new3aState.afState,
-                m3aState.afMode, new3aState.afMode,
-                m3aState.afTriggerId, new3aState.afTriggerId);
-        client->notifyAutoFocus(new3aState.afState, new3aState.afTriggerId);
+                m3aState.afState, pendingState.afState,
+                m3aState.afMode, pendingState.afMode,
+                m3aState.afTriggerId, pendingState.afTriggerId);
+        client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
     }
-    if (new3aState.awbState != m3aState.awbState ||
-        new3aState.awbMode != m3aState.awbMode) {
+    if (pendingState.awbState != m3aState.awbState ||
+        pendingState.awbMode != m3aState.awbMode) {
         ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
                 __FUNCTION__, cameraId,
-                m3aState.awbState, new3aState.awbState,
-                m3aState.awbMode, new3aState.awbMode);
-        client->notifyAutoWhitebalance(new3aState.awbState,
-                new3aState.aeTriggerId);
+                m3aState.awbState, pendingState.awbState,
+                m3aState.awbMode, pendingState.awbMode);
+        client->notifyAutoWhitebalance(pendingState.awbState,
+                pendingState.aeTriggerId);
     }
 
-    m3aState = new3aState;
+    if (index != NAME_NOT_FOUND) {
+        mPending3AStates.removeItemsAt(index);
+    }
+
+    m3aState = pendingState;
+    mLast3AFrameNumber = frameNumber;
 
     return OK;
 }
 
 template<typename Src, typename T>
-bool FrameProcessor::get3aResult(const CameraMetadata& result, int32_t tag,
+bool FrameProcessor::updatePendingState(const CameraMetadata& result, int32_t tag,
         T* value, int32_t frameNumber, int cameraId) {
     camera_metadata_ro_entry_t entry;
     if (value == NULL) {
@@ -335,9 +360,14 @@
         return false;
     }
 
+    // Already got the value for this tag.
+    if (*value != static_cast<T>(NOT_SET)) {
+        return true;
+    }
+
     entry = result.find(tag);
     if (entry.count == 0) {
-        ALOGE("%s: Camera %d: No %s provided by HAL for frame %d!",
+        ALOGV("%s: Camera %d: No %s provided by HAL for frame %d in this result!",
                 __FUNCTION__, cameraId,
                 get_camera_metadata_tag_name(tag), frameNumber);
         return false;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 68cf55b..a5b81a7 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -43,6 +43,8 @@
     ~FrameProcessor();
 
   private:
+    static const int32_t NOT_SET = -1;
+
     wp<Camera2Client> mClient;
 
     bool mSynthesize3ANotify;
@@ -63,7 +65,7 @@
 
     // Helper for process3aState
     template<typename Src, typename T>
-    bool get3aResult(const CameraMetadata& result, int32_t tag, T* value,
+    bool updatePendingState(const CameraMetadata& result, int32_t tag, T* value,
             int32_t frameNumber, int cameraId);
 
 
@@ -81,15 +83,20 @@
 
         // These defaults need to match those in Parameters.cpp
         AlgState() :
-                afMode(ANDROID_CONTROL_AF_MODE_AUTO),
-                awbMode(ANDROID_CONTROL_AWB_MODE_AUTO),
-                aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
-                afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
-                awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE),
-                afTriggerId(0),
-                aeTriggerId(0) {
+                afMode((camera_metadata_enum_android_control_af_mode)NOT_SET),
+                awbMode((camera_metadata_enum_android_control_awb_mode)NOT_SET),
+                aeState((camera_metadata_enum_android_control_ae_state)NOT_SET),
+                afState((camera_metadata_enum_android_control_af_state)NOT_SET),
+                awbState((camera_metadata_enum_android_control_awb_state)NOT_SET),
+                afTriggerId(NOT_SET),
+                aeTriggerId(NOT_SET) {
         }
-    } m3aState;
+    };
+
+    AlgState m3aState;
+
+    // frame number -> pending 3A states that not all data are received yet.
+    KeyedVector<int32_t, AlgState> mPending3AStates;
 
     // Whether the partial result is enabled for this device
     bool mUsePartialResult;
diff --git a/services/camera/libcameraservice/api1/client2/JpegCompressor.h b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
index 945b1de..df5da54 100644
--- a/services/camera/libcameraservice/api1/client2/JpegCompressor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
@@ -71,7 +71,6 @@
     Vector<CpuConsumer::LockedBuffer*> mBuffers;
     CpuConsumer::LockedBuffer *mJpegBuffer;
     CpuConsumer::LockedBuffer *mAuxBuffer;
-    bool mFoundJpeg, mFoundAux;
 
     jpeg_compress_struct mCInfo;
 
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index bd9786f..ffe96fc 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -42,7 +42,8 @@
         mDevice(client->getCameraDevice()),
         mSequencer(sequencer),
         mId(client->getCameraId()),
-        mCaptureAvailable(false),
+        mCaptureDone(false),
+        mCaptureSuccess(false),
         mCaptureStreamId(NO_STREAM) {
 }
 
@@ -53,9 +54,26 @@
 
 void JpegProcessor::onFrameAvailable(const BufferItem& /*item*/) {
     Mutex::Autolock l(mInputMutex);
-    if (!mCaptureAvailable) {
-        mCaptureAvailable = true;
-        mCaptureAvailableSignal.signal();
+    ALOGV("%s", __FUNCTION__);
+    if (!mCaptureDone) {
+        mCaptureDone = true;
+        mCaptureSuccess = true;
+        mCaptureDoneSignal.signal();
+    }
+}
+
+void JpegProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
+    // Intentionally left empty
+}
+
+void JpegProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
+    Mutex::Autolock l(mInputMutex);
+    ALOGV("%s", __FUNCTION__);
+
+    if (bufferInfo.mError) {
+        mCaptureDone = true;
+        mCaptureSuccess = false;
+        mCaptureDoneSignal.signal();
     }
 }
 
@@ -108,7 +126,7 @@
             return NO_MEMORY;
         }
     }
-    ALOGV("%s: Camera %d: JPEG capture heap now %d bytes; requested %d bytes",
+    ALOGV("%s: Camera %d: JPEG capture heap now %zu bytes; requested %zd bytes",
             __FUNCTION__, mId, mCaptureHeap->getSize(), maxJpegSize);
 
     if (mCaptureStreamId != NO_STREAM) {
@@ -145,7 +163,7 @@
         // Create stream for HAL production
         res = device->createStream(mCaptureWindow,
                 params.pictureWidth, params.pictureHeight,
-                HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_JFIF,
+                HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_V0_JFIF,
                 CAMERA3_STREAM_ROTATION_0, &mCaptureStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for capture: "
@@ -154,6 +172,12 @@
             return res;
         }
 
+        res = device->addBufferListenerForStream(mCaptureStreamId, this);
+        if (res != OK) {
+              ALOGE("%s: Camera %d: Can't add buffer listeneri: %s (%d)",
+                    __FUNCTION__, mId, strerror(-res), res);
+              return res;
+        }
     }
     return OK;
 }
@@ -192,24 +216,26 @@
 bool JpegProcessor::threadLoop() {
     status_t res;
 
+    bool captureSuccess = false;
     {
         Mutex::Autolock l(mInputMutex);
-        while (!mCaptureAvailable) {
-            res = mCaptureAvailableSignal.waitRelative(mInputMutex,
+
+        while (!mCaptureDone) {
+            res = mCaptureDoneSignal.waitRelative(mInputMutex,
                     kWaitDuration);
             if (res == TIMED_OUT) return true;
         }
-        mCaptureAvailable = false;
+
+        captureSuccess = mCaptureSuccess;
+        mCaptureDone = false;
     }
 
-    do {
-        res = processNewCapture();
-    } while (res == OK);
+    res = processNewCapture(captureSuccess);
 
     return true;
 }
 
-status_t JpegProcessor::processNewCapture() {
+status_t JpegProcessor::processNewCapture(bool captureSuccess) {
     ATRACE_CALL();
     status_t res;
     sp<Camera2Heap> captureHeap;
@@ -217,7 +243,7 @@
 
     CpuConsumer::LockedBuffer imgBuffer;
 
-    {
+    if (captureSuccess) {
         Mutex::Autolock l(mInputMutex);
         if (mCaptureStreamId == NO_STREAM) {
             ALOGW("%s: Camera %d: No stream is available", __FUNCTION__, mId);
@@ -269,7 +295,7 @@
 
     sp<CaptureSequencer> sequencer = mSequencer.promote();
     if (sequencer != 0) {
-        sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer);
+        sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer, !captureSuccess);
     }
 
     return OK;
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index fbdae11..7187ad9 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -35,13 +35,14 @@
 namespace camera2 {
 
 class CaptureSequencer;
-class Parameters;
+struct Parameters;
 
 /***
  * Still image capture output image processing
  */
 class JpegProcessor:
-            public Thread, public CpuConsumer::FrameAvailableListener {
+            public Thread, public CpuConsumer::FrameAvailableListener,
+            public camera3::Camera3StreamBufferListener {
   public:
     JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
     ~JpegProcessor();
@@ -49,6 +50,10 @@
     // CpuConsumer listener implementation
     void onFrameAvailable(const BufferItem& item);
 
+    // Camera3StreamBufferListener implementation
+    void onBufferAcquired(const BufferInfo& bufferInfo) override;
+    void onBufferReleased(const BufferInfo& bufferInfo) override;
+
     status_t updateStream(const Parameters &params);
     status_t deleteStream();
     int getStreamId() const;
@@ -61,8 +66,9 @@
     int mId;
 
     mutable Mutex mInputMutex;
-    bool mCaptureAvailable;
-    Condition mCaptureAvailableSignal;
+    bool mCaptureDone;
+    bool mCaptureSuccess;
+    Condition mCaptureDoneSignal;
 
     enum {
         NO_STREAM = -1
@@ -75,7 +81,7 @@
 
     virtual bool threadLoop();
 
-    status_t processNewCapture();
+    status_t processNewCapture(bool captureSuccess);
     size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
 
 };
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 44447b4..5779176 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -30,6 +30,7 @@
 #include "Parameters.h"
 #include "system/camera.h"
 #include "hardware/camera_common.h"
+#include <android/hardware/ICamera.h>
 #include <media/MediaProfiles.h>
 #include <media/mediarecorder.h>
 
@@ -870,8 +871,9 @@
     }
 
     // Set up initial state for non-Camera.Parameters state variables
-
-    storeMetadataInBuffers = true;
+    videoFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    videoDataSpace = HAL_DATASPACE_V0_BT709;
+    videoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV;
     playShutterSound = true;
     enableFaceDetect = false;
 
@@ -913,8 +915,6 @@
 
     ALOGI("%s: zslMode: %d slowJpegMode %d", __FUNCTION__, zslMode, slowJpegMode);
 
-    lightFx = LIGHTFX_NONE;
-
     state = STOPPED;
 
     paramsFlattened = params.flatten();
@@ -1040,7 +1040,7 @@
             ALOGE("%s: Camera %d: Scene mode override list is an "
                     "unexpected size: %zu (expected %zu)", __FUNCTION__,
                     cameraId, sceneModeOverrides.count,
-                    availableSceneModes.count);
+                    availableSceneModes.count * kModesPerSceneMode);
             return NO_INIT;
         }
         for (size_t i = 0; i < availableSceneModes.count; i++) {
@@ -1864,10 +1864,6 @@
         ALOGE("%s: Video stabilization not supported", __FUNCTION__);
     }
 
-    // LIGHTFX
-    validatedParams.lightFx = lightFxStringToEnum(
-        newParams.get(CameraParameters::KEY_LIGHTFX));
-
     /** Update internal parameters */
 
     *this = validatedParams;
@@ -1959,7 +1955,7 @@
     if (res != OK) return res;
 
     // android.hardware.Camera requires that when face detect is enabled, the
-    // camera is in a face-priority mode. HAL2 splits this into separate parts
+    // camera is in a face-priority mode. HAL3.x splits this into separate parts
     // (face detection statistics and face priority scene mode). Map from other
     // to the other.
     bool sceneModeActive =
@@ -2501,18 +2497,6 @@
     }
 }
 
-Parameters::Parameters::lightFxMode_t Parameters::lightFxStringToEnum(
-        const char *lightFxMode) {
-    return
-        !lightFxMode ?
-            Parameters::LIGHTFX_NONE :
-        !strcmp(lightFxMode, CameraParameters::LIGHTFX_LOWLIGHT) ?
-            Parameters::LIGHTFX_LOWLIGHT :
-        !strcmp(lightFxMode, CameraParameters::LIGHTFX_HDR) ?
-            Parameters::LIGHTFX_HDR :
-        Parameters::LIGHTFX_NONE;
-}
-
 status_t Parameters::parseAreas(const char *areasCStr,
         Vector<Parameters::Area> *areas) {
     static const size_t NUM_FIELDS = 5;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 972d007..c437722 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -131,23 +131,19 @@
 
     int zoom;
 
-    int videoWidth, videoHeight;
+    int videoWidth, videoHeight, videoFormat;
+    android_dataspace videoDataSpace;
 
     bool recordingHint;
     bool videoStabilization;
 
-    enum lightFxMode_t {
-        LIGHTFX_NONE = 0,
-        LIGHTFX_LOWLIGHT,
-        LIGHTFX_HDR
-    } lightFx;
-
     CameraParameters2 params;
     String8 paramsFlattened;
 
     // These parameters are also part of the camera API-visible state, but not
     // directly listed in Camera.Parameters
-    bool storeMetadataInBuffers;
+    // One of ICamera::VIDEO_BUFFER_MODE_*
+    int32_t videoBufferMode;
     bool playShutterSound;
     bool enableFaceDetect;
 
@@ -307,7 +303,6 @@
     static const char* flashModeEnumToString(flashMode_t flashMode);
     static focusMode_t focusModeStringToEnum(const char *focusMode);
     static const char* focusModeEnumToString(focusMode_t focusMode);
-    static lightFxMode_t lightFxStringToEnum(const char *lightFxMode);
 
     static status_t parseAreas(const char *areasCStr,
             Vector<Area> *areas);
@@ -330,7 +325,7 @@
     static const int kFpsToApiScale = 1000;
 
     // Transform from (-1000,-1000)-(1000,1000) normalized coords from camera
-    // API to HAL2 (0,0)-(activePixelArray.width/height) coordinates
+    // API to HAL3 (0,0)-(activePixelArray.width/height) coordinates
     int normalizedXToArray(int x) const;
     int normalizedYToArray(int y) const;
 
@@ -350,7 +345,7 @@
 private:
 
     // Convert from viewfinder crop-region relative array coordinates
-    // to HAL2 sensor array coordinates
+    // to HAL3 sensor array coordinates
     int cropXToArray(int x) const;
     int cropYToArray(int y) const;
 
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 9e6c0db..a2c9712 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -30,7 +30,6 @@
 #include <utils/Trace.h>
 #include <gui/BufferItem.h>
 #include <gui/Surface.h>
-#include <camera/ICameraRecordingProxy.h>
 #include <media/hardware/HardwareAPI.h>
 
 #include "common/CameraDeviceBase.h"
@@ -50,13 +49,7 @@
         mPreviewRequestId(Camera2Client::kPreviewRequestIdStart),
         mPreviewStreamId(NO_STREAM),
         mRecordingRequestId(Camera2Client::kRecordingRequestIdStart),
-        mRecordingStreamId(NO_STREAM),
-        mRecordingFrameAvailable(false),
-        mRecordingHeapCount(kDefaultRecordingHeapCount),
-        mRecordingHeapFree(kDefaultRecordingHeapCount),
-        mRecordingFormat(kDefaultRecordingFormat),
-        mRecordingDataSpace(kDefaultRecordingDataSpace),
-        mRecordingGrallocUsage(kDefaultRecordingGrallocUsage)
+        mRecordingStreamId(NO_STREAM)
 {
 }
 
@@ -79,11 +72,30 @@
     return OK;
 }
 
+status_t StreamingProcessor::setRecordingWindow(sp<Surface> window) {
+    ATRACE_CALL();
+    status_t res;
+
+    res = deleteRecordingStream();
+    if (res != OK) return res;
+
+    Mutex::Autolock m(mMutex);
+
+    mRecordingWindow = window;
+
+    return OK;
+}
+
 bool StreamingProcessor::haveValidPreviewWindow() const {
     Mutex::Autolock m(mMutex);
     return mPreviewWindow != 0;
 }
 
+bool StreamingProcessor::haveValidRecordingWindow() const {
+    Mutex::Autolock m(mMutex);
+    return mRecordingWindow != nullptr;
+}
+
 status_t StreamingProcessor::updatePreviewRequest(const Parameters &params) {
     ATRACE_CALL();
     status_t res;
@@ -245,86 +257,6 @@
     return mPreviewStreamId;
 }
 
-status_t StreamingProcessor::setRecordingBufferCount(size_t count) {
-    ATRACE_CALL();
-    // Make sure we can support this many buffer slots
-    if (count > BufferQueue::NUM_BUFFER_SLOTS) {
-        ALOGE("%s: Camera %d: Too many recording buffers requested: %zu, max %d",
-                __FUNCTION__, mId, count, BufferQueue::NUM_BUFFER_SLOTS);
-        return BAD_VALUE;
-    }
-
-    Mutex::Autolock m(mMutex);
-
-    ALOGV("%s: Camera %d: New recording buffer count from encoder: %zu",
-            __FUNCTION__, mId, count);
-
-    // Need to re-size consumer and heap
-    if (mRecordingHeapCount != count) {
-        ALOGV("%s: Camera %d: Resetting recording heap and consumer",
-            __FUNCTION__, mId);
-
-        if (isStreamActive(mActiveStreamIds, mRecordingStreamId)) {
-            ALOGE("%s: Camera %d: Setting recording buffer count when "
-                    "recording stream is already active!", __FUNCTION__,
-                    mId);
-            return INVALID_OPERATION;
-        }
-
-        releaseAllRecordingFramesLocked();
-
-        if (mRecordingHeap != 0) {
-            mRecordingHeap.clear();
-        }
-        mRecordingHeapCount = count;
-        mRecordingHeapFree = count;
-
-        mRecordingConsumer.clear();
-    }
-
-    return OK;
-}
-
-status_t StreamingProcessor::setRecordingFormat(int format,
-        android_dataspace dataSpace) {
-    ATRACE_CALL();
-
-    Mutex::Autolock m(mMutex);
-
-    ALOGV("%s: Camera %d: New recording format/dataspace from encoder: %X, %X",
-            __FUNCTION__, mId, format, dataSpace);
-
-    mRecordingFormat = format;
-    mRecordingDataSpace = dataSpace;
-    int prevGrallocUsage = mRecordingGrallocUsage;
-    if (mRecordingFormat == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
-        mRecordingGrallocUsage = GRALLOC_USAGE_HW_VIDEO_ENCODER;
-    } else {
-        mRecordingGrallocUsage = GRALLOC_USAGE_SW_READ_OFTEN;
-    }
-
-    ALOGV("%s: Camera %d: New recording gralloc usage: %08X", __FUNCTION__, mId,
-            mRecordingGrallocUsage);
-
-    if (prevGrallocUsage != mRecordingGrallocUsage) {
-        ALOGV("%s: Camera %d: Resetting recording consumer for new usage",
-            __FUNCTION__, mId);
-
-        if (isStreamActive(mActiveStreamIds, mRecordingStreamId)) {
-            ALOGE("%s: Camera %d: Changing recording format when "
-                    "recording stream is already active!", __FUNCTION__,
-                    mId);
-            return INVALID_OPERATION;
-        }
-
-        releaseAllRecordingFramesLocked();
-
-        mRecordingConsumer.clear();
-    }
-
-    return OK;
-}
-
 status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
     ATRACE_CALL();
     status_t res;
@@ -396,12 +328,13 @@
         return res;
     }
 
-    if (mRecordingConsumer == 0 ||
+    if (mRecordingWindow == nullptr ||
             currentWidth != (uint32_t)params.videoWidth ||
             currentHeight != (uint32_t)params.videoHeight ||
-            currentFormat != (uint32_t)mRecordingFormat ||
-            currentDataSpace != mRecordingDataSpace) {
+            currentFormat != (uint32_t)params.videoFormat ||
+            currentDataSpace != params.videoDataSpace) {
         *needsUpdate = true;
+        return res;
     }
     *needsUpdate = false;
     return res;
@@ -418,26 +351,6 @@
         return INVALID_OPERATION;
     }
 
-    bool newConsumer = false;
-    if (mRecordingConsumer == 0) {
-        ALOGV("%s: Camera %d: Creating recording consumer with %zu + 1 "
-                "consumer-side buffers", __FUNCTION__, mId, mRecordingHeapCount);
-        // Create CPU buffer queue endpoint. We need one more buffer here so that we can
-        // always acquire and free a buffer when the heap is full; otherwise the consumer
-        // will have buffers in flight we'll never clear out.
-        sp<IGraphicBufferProducer> producer;
-        sp<IGraphicBufferConsumer> consumer;
-        BufferQueue::createBufferQueue(&producer, &consumer);
-        mRecordingConsumer = new BufferItemConsumer(consumer,
-                mRecordingGrallocUsage,
-                mRecordingHeapCount + 1);
-        mRecordingConsumer->setFrameAvailableListener(this);
-        mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
-        mRecordingWindow = new Surface(producer);
-        newConsumer = true;
-        // Allocate memory later, since we don't know buffer size until receipt
-    }
-
     if (mRecordingStreamId != NO_STREAM) {
         // Check if stream parameters have to change
         uint32_t currentWidth, currentHeight;
@@ -454,9 +367,8 @@
         }
         if (currentWidth != (uint32_t)params.videoWidth ||
                 currentHeight != (uint32_t)params.videoHeight ||
-                currentFormat != (uint32_t)mRecordingFormat ||
-                currentDataSpace != mRecordingDataSpace ||
-                newConsumer) {
+                currentFormat != (uint32_t)params.videoFormat ||
+                currentDataSpace != params.videoDataSpace) {
             // TODO: Should wait to be sure previous recording has finished
             res = device->deleteStream(mRecordingStreamId);
 
@@ -476,10 +388,9 @@
     }
 
     if (mRecordingStreamId == NO_STREAM) {
-        mRecordingFrameCount = 0;
         res = device->createStream(mRecordingWindow,
                 params.videoWidth, params.videoHeight,
-                mRecordingFormat, mRecordingDataSpace,
+                params.videoFormat, params.videoDataSpace,
                 CAMERA3_STREAM_ROTATION_0, &mRecordingStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for recording: "
@@ -543,20 +454,6 @@
 
     Mutex::Autolock m(mMutex);
 
-    // If a recording stream is being started up and no recording
-    // stream is active yet, free up any outstanding buffers left
-    // from the previous recording session. There should never be
-    // any, so if there are, warn about it.
-    bool isRecordingStreamIdle = !isStreamActive(mActiveStreamIds, mRecordingStreamId);
-    bool startRecordingStream = isStreamActive(outputStreams, mRecordingStreamId);
-    if (startRecordingStream && isRecordingStreamIdle) {
-        releaseAllRecordingFramesLocked();
-    }
-
-    ALOGV("%s: Camera %d: %s started, recording heap has %zu free of %zu",
-            __FUNCTION__, mId, (type == PREVIEW) ? "preview" : "recording",
-            mRecordingHeapFree, mRecordingHeapCount);
-
     CameraMetadata &request = (type == PREVIEW) ?
             mPreviewRequest : mRecordingRequest;
 
@@ -693,279 +590,6 @@
     return OK;
 }
 
-void StreamingProcessor::onFrameAvailable(const BufferItem& /*item*/) {
-    ATRACE_CALL();
-    Mutex::Autolock l(mMutex);
-    if (!mRecordingFrameAvailable) {
-        mRecordingFrameAvailable = true;
-        mRecordingFrameAvailableSignal.signal();
-    }
-
-}
-
-bool StreamingProcessor::threadLoop() {
-    status_t res;
-
-    {
-        Mutex::Autolock l(mMutex);
-        while (!mRecordingFrameAvailable) {
-            res = mRecordingFrameAvailableSignal.waitRelative(
-                mMutex, kWaitDuration);
-            if (res == TIMED_OUT) return true;
-        }
-        mRecordingFrameAvailable = false;
-    }
-
-    do {
-        res = processRecordingFrame();
-    } while (res == OK);
-
-    return true;
-}
-
-status_t StreamingProcessor::processRecordingFrame() {
-    ATRACE_CALL();
-    status_t res;
-    sp<Camera2Heap> recordingHeap;
-    size_t heapIdx = 0;
-    nsecs_t timestamp;
-
-    sp<Camera2Client> client = mClient.promote();
-    if (client == 0) {
-        // Discard frames during shutdown
-        BufferItem imgBuffer;
-        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
-        if (res != OK) {
-            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
-                ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
-                        __FUNCTION__, mId, strerror(-res), res);
-            }
-            return res;
-        }
-        mRecordingConsumer->releaseBuffer(imgBuffer);
-        return OK;
-    }
-
-    {
-        /* acquire SharedParameters before mMutex so we don't dead lock
-            with Camera2Client code calling into StreamingProcessor */
-        SharedParameters::Lock l(client->getParameters());
-        Mutex::Autolock m(mMutex);
-        BufferItem imgBuffer;
-        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
-        if (res != OK) {
-            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
-                ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
-                        __FUNCTION__, mId, strerror(-res), res);
-            }
-            return res;
-        }
-        timestamp = imgBuffer.mTimestamp;
-
-        mRecordingFrameCount++;
-        ALOGVV("OnRecordingFrame: Frame %d", mRecordingFrameCount);
-
-        if (l.mParameters.state != Parameters::RECORD &&
-                l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
-            ALOGV("%s: Camera %d: Discarding recording image buffers "
-                    "received after recording done", __FUNCTION__,
-                    mId);
-            mRecordingConsumer->releaseBuffer(imgBuffer);
-            return INVALID_OPERATION;
-        }
-
-        if (mRecordingHeap == 0) {
-            size_t payloadSize = sizeof(VideoNativeMetadata);
-            ALOGV("%s: Camera %d: Creating recording heap with %zu buffers of "
-                    "size %zu bytes", __FUNCTION__, mId,
-                    mRecordingHeapCount, payloadSize);
-
-            mRecordingHeap = new Camera2Heap(payloadSize, mRecordingHeapCount,
-                    "Camera2Client::RecordingHeap");
-            if (mRecordingHeap->mHeap->getSize() == 0) {
-                ALOGE("%s: Camera %d: Unable to allocate memory for recording",
-                        __FUNCTION__, mId);
-                mRecordingConsumer->releaseBuffer(imgBuffer);
-                return NO_MEMORY;
-            }
-            for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
-                if (mRecordingBuffers[i].mBuf !=
-                        BufferItemConsumer::INVALID_BUFFER_SLOT) {
-                    ALOGE("%s: Camera %d: Non-empty recording buffers list!",
-                            __FUNCTION__, mId);
-                }
-            }
-            mRecordingBuffers.clear();
-            mRecordingBuffers.setCapacity(mRecordingHeapCount);
-            mRecordingBuffers.insertAt(0, mRecordingHeapCount);
-
-            mRecordingHeapHead = 0;
-            mRecordingHeapFree = mRecordingHeapCount;
-        }
-
-        if (mRecordingHeapFree == 0) {
-            ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
-                    __FUNCTION__, mId);
-            mRecordingConsumer->releaseBuffer(imgBuffer);
-            return NO_MEMORY;
-        }
-
-        heapIdx = mRecordingHeapHead;
-        mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
-        mRecordingHeapFree--;
-
-        ALOGVV("%s: Camera %d: Timestamp %lld",
-                __FUNCTION__, mId, timestamp);
-
-        ssize_t offset;
-        size_t size;
-        sp<IMemoryHeap> heap =
-                mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset,
-                        &size);
-
-        VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
-            (uint8_t*)heap->getBase() + offset);
-        payload->eType = kMetadataBufferTypeANWBuffer;
-        payload->pBuffer = imgBuffer.mGraphicBuffer->getNativeBuffer();
-        // b/28466701
-        payload->pBuffer = (ANativeWindowBuffer*)((uint8_t*)payload->pBuffer -
-                ICameraRecordingProxy::getCommonBaseAddress());
-        payload->nFenceFd = -1;
-
-        ALOGVV("%s: Camera %d: Sending out ANWBuffer %p",
-                __FUNCTION__, mId, payload->pBuffer);
-
-        mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
-        recordingHeap = mRecordingHeap;
-    }
-
-    // Call outside locked parameters to allow re-entrancy from notification
-    Camera2Client::SharedCameraCallbacks::Lock l(client->mSharedCameraCallbacks);
-    if (l.mRemoteCallback != 0) {
-        l.mRemoteCallback->dataCallbackTimestamp(timestamp,
-                CAMERA_MSG_VIDEO_FRAME,
-                recordingHeap->mBuffers[heapIdx]);
-    } else {
-        ALOGW("%s: Camera %d: Remote callback gone", __FUNCTION__, mId);
-    }
-
-    return OK;
-}
-
-void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
-    ATRACE_CALL();
-    status_t res;
-
-    Mutex::Autolock m(mMutex);
-    // Make sure this is for the current heap
-    ssize_t offset;
-    size_t size;
-    sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
-    if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) {
-        ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release "
-                "(got %x, expected %x)", __FUNCTION__, mId,
-                heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
-        return;
-    }
-
-    VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
-        (uint8_t*)heap->getBase() + offset);
-
-    if (payload->eType != kMetadataBufferTypeANWBuffer) {
-        ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
-                __FUNCTION__, mId, payload->eType,
-                kMetadataBufferTypeANWBuffer);
-        return;
-    }
-
-    // b/28466701
-    payload->pBuffer = (ANativeWindowBuffer*)(((uint8_t*)payload->pBuffer) +
-            ICameraRecordingProxy::getCommonBaseAddress());
-
-    // Release the buffer back to the recording queue
-    size_t itemIndex;
-    for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
-        const BufferItem item = mRecordingBuffers[itemIndex];
-        if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT &&
-                item.mGraphicBuffer->getNativeBuffer() == payload->pBuffer) {
-                break;
-        }
-    }
-
-    if (itemIndex == mRecordingBuffers.size()) {
-        ALOGE("%s: Camera %d: Can't find returned ANW Buffer %p in list of "
-                "outstanding buffers", __FUNCTION__, mId,
-                payload->pBuffer);
-        return;
-    }
-
-    ALOGVV("%s: Camera %d: Freeing returned ANW buffer %p index %d", __FUNCTION__,
-            mId, payload->pBuffer, itemIndex);
-
-    res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to free recording frame "
-                "(Returned ANW buffer: %p): %s (%d)", __FUNCTION__,
-                mId, payload->pBuffer, strerror(-res), res);
-        return;
-    }
-    mRecordingBuffers.replaceAt(itemIndex);
-
-    mRecordingHeapFree++;
-    ALOGV_IF(mRecordingHeapFree == mRecordingHeapCount,
-            "%s: Camera %d: All %d recording buffers returned",
-            __FUNCTION__, mId, mRecordingHeapCount);
-}
-
-void StreamingProcessor::releaseAllRecordingFramesLocked() {
-    ATRACE_CALL();
-    status_t res;
-
-    if (mRecordingConsumer == 0) {
-        return;
-    }
-
-    ALOGV("%s: Camera %d: Releasing all recording buffers", __FUNCTION__,
-            mId);
-
-    size_t releasedCount = 0;
-    for (size_t itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
-        const BufferItem item = mRecordingBuffers[itemIndex];
-        if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT) {
-            res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to free recording frame "
-                        "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
-                        mId, item.mGraphicBuffer->handle, strerror(-res), res);
-            }
-            mRecordingBuffers.replaceAt(itemIndex);
-            releasedCount++;
-        }
-    }
-
-    if (releasedCount > 0) {
-        ALOGW("%s: Camera %d: Force-freed %zu outstanding buffers "
-                "from previous recording session", __FUNCTION__, mId, releasedCount);
-        ALOGE_IF(releasedCount != mRecordingHeapCount - mRecordingHeapFree,
-            "%s: Camera %d: Force-freed %zu buffers, but expected %zu",
-            __FUNCTION__, mId, releasedCount, mRecordingHeapCount - mRecordingHeapFree);
-    }
-
-    mRecordingHeapHead = 0;
-    mRecordingHeapFree = mRecordingHeapCount;
-}
-
-bool StreamingProcessor::isStreamActive(const Vector<int32_t> &streams,
-        int32_t recordingStreamId) {
-    for (size_t i = 0; i < streams.size(); i++) {
-        if (streams[i] == recordingStreamId) {
-            return true;
-        }
-    }
-    return false;
-}
-
-
 status_t StreamingProcessor::dump(int fd, const Vector<String16>& /*args*/) {
     String8 result;
 
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
index e0cad3a..57e6389 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
@@ -31,30 +31,28 @@
 
 namespace camera2 {
 
-class Parameters;
+struct Parameters;
 class Camera2Heap;
 
 /**
  * Management and processing for preview and recording streams
  */
-class StreamingProcessor:
-            public Thread, public BufferItemConsumer::FrameAvailableListener {
+class StreamingProcessor : public virtual VirtualLightRefBase {
   public:
     StreamingProcessor(sp<Camera2Client> client);
     ~StreamingProcessor();
 
     status_t setPreviewWindow(sp<Surface> window);
+    status_t setRecordingWindow(sp<Surface> window);
 
     bool haveValidPreviewWindow() const;
+    bool haveValidRecordingWindow() const;
 
     status_t updatePreviewRequest(const Parameters &params);
     status_t updatePreviewStream(const Parameters &params);
     status_t deletePreviewStream();
     int getPreviewStreamId() const;
 
-    status_t setRecordingBufferCount(size_t count);
-    status_t setRecordingFormat(int format, android_dataspace_t dataspace);
-
     status_t updateRecordingRequest(const Parameters &params);
     // If needsUpdate is set to true, a updateRecordingStream call with params will recreate
     // recording stream
@@ -81,11 +79,6 @@
     status_t getActiveRequestId() const;
     status_t incrementStreamingIds();
 
-    // Callback for new recording frames from HAL
-    virtual void onFrameAvailable(const BufferItem& item);
-    // Callback from stagefright which returns used recording frames
-    void releaseRecordingFrame(const sp<IMemory>& mem);
-
     status_t dump(int fd, const Vector<String16>& args);
 
   private:
@@ -110,47 +103,10 @@
     CameraMetadata mPreviewRequest;
     sp<Surface> mPreviewWindow;
 
-    // Recording-related members
-    static const nsecs_t kWaitDuration = 50000000; // 50 ms
-
     int32_t mRecordingRequestId;
     int mRecordingStreamId;
-    int mRecordingFrameCount;
-    sp<BufferItemConsumer> mRecordingConsumer;
     sp<Surface>  mRecordingWindow;
     CameraMetadata mRecordingRequest;
-    sp<camera2::Camera2Heap> mRecordingHeap;
-
-    bool mRecordingFrameAvailable;
-    Condition mRecordingFrameAvailableSignal;
-
-    static const size_t kDefaultRecordingHeapCount = 8;
-    size_t mRecordingHeapCount;
-    Vector<BufferItem> mRecordingBuffers;
-    size_t mRecordingHeapHead, mRecordingHeapFree;
-
-    static const int kDefaultRecordingFormat =
-            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
-    int mRecordingFormat;
-
-    static const android_dataspace kDefaultRecordingDataSpace =
-            HAL_DATASPACE_BT709;
-    android_dataspace mRecordingDataSpace;
-
-    static const int kDefaultRecordingGrallocUsage =
-            GRALLOC_USAGE_HW_VIDEO_ENCODER;
-    int mRecordingGrallocUsage;
-
-    virtual bool threadLoop();
-
-    status_t processRecordingFrame();
-
-    // Unilaterally free any buffers still outstanding to stagefright
-    void releaseAllRecordingFramesLocked();
-
-    // Determine if the specified stream is currently in use
-    static bool isStreamActive(const Vector<int32_t> &streams,
-            int32_t recordingStreamId);
 };
 
 
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 0b79b31..b127472 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@
 #ifdef LOG_NNDEBUG
 #define ALOGVV(...) ALOGV(__VA_ARGS__)
 #else
-#define ALOGVV(...) ((void)0)
+#define ALOGVV(...) if (0) ALOGV(__VA_ARGS__)
 #endif
 
 #include <inttypes.h>
@@ -35,6 +35,7 @@
 #include "api1/Camera2Client.h"
 #include "api1/client2/CaptureSequencer.h"
 #include "api1/client2/ZslProcessor.h"
+#include "device3/Camera3Device.h"
 
 namespace android {
 namespace camera2 {
@@ -43,35 +44,55 @@
     sp<Camera2Client> client,
     wp<CaptureSequencer> sequencer):
         Thread(false),
+        mLatestClearedBufferTimestamp(0),
         mState(RUNNING),
         mClient(client),
-        mDevice(client->getCameraDevice()),
         mSequencer(sequencer),
         mId(client->getCameraId()),
-        mDeleted(false),
-        mZslBufferAvailable(false),
         mZslStreamId(NO_STREAM),
-        mZslReprocessStreamId(NO_STREAM),
         mFrameListHead(0),
-        mZslQueueHead(0),
-        mZslQueueTail(0) {
-    mZslQueue.insertAt(0, kZslBufferDepth);
-    mFrameList.insertAt(0, kFrameListDepth);
+        mHasFocuser(false) {
+    // Initialize buffer queue and frame list based on pipeline max depth.
+    size_t pipelineMaxDepth = kDefaultMaxPipelineDepth;
+    if (client != 0) {
+        sp<Camera3Device> device =
+        static_cast<Camera3Device*>(client->getCameraDevice().get());
+        if (device != 0) {
+            camera_metadata_ro_entry_t entry =
+                device->info().find(ANDROID_REQUEST_PIPELINE_MAX_DEPTH);
+            if (entry.count == 1) {
+                pipelineMaxDepth = entry.data.u8[0];
+            } else {
+                ALOGW("%s: Unable to find the android.request.pipelineMaxDepth,"
+                        " use default pipeline max depth %d", __FUNCTION__,
+                        kDefaultMaxPipelineDepth);
+            }
+
+            entry = device->info().find(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
+            if (entry.count > 0 && entry.data.f[0] != 0.) {
+                mHasFocuser = true;
+            }
+        }
+    }
+
+    ALOGV("%s: Initialize buffer queue and frame list depth based on max pipeline depth (%zu)",
+          __FUNCTION__, pipelineMaxDepth);
+    // Need to keep buffer queue longer than metadata queue because sometimes buffer arrives
+    // earlier than metadata which causes the buffer corresponding to oldest metadata being
+    // removed.
+    mFrameListDepth = pipelineMaxDepth;
+    mBufferQueueDepth = mFrameListDepth + 1;
+
+
+    mZslQueue.insertAt(0, mBufferQueueDepth);
+    mFrameList.insertAt(0, mFrameListDepth);
     sp<CaptureSequencer> captureSequencer = mSequencer.promote();
     if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
 }
 
 ZslProcessor::~ZslProcessor() {
     ALOGV("%s: Exit", __FUNCTION__);
-    disconnect();
-}
-
-void ZslProcessor::onFrameAvailable(const BufferItem& /*item*/) {
-    Mutex::Autolock l(mInputMutex);
-    if (!mZslBufferAvailable) {
-        mZslBufferAvailable = true;
-        mZslBufferAvailableSignal.signal();
-    }
+    deleteStream();
 }
 
 void ZslProcessor::onResultAvailable(const CaptureResult &result) {
@@ -81,35 +102,27 @@
     camera_metadata_ro_entry_t entry;
     entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
     nsecs_t timestamp = entry.data.i64[0];
-    (void)timestamp;
-    ALOGVV("Got preview frame for timestamp %" PRId64, timestamp);
+    if (entry.count == 0) {
+        ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
+        return;
+    }
+
+    entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
+    if (entry.count == 0) {
+        ALOGE("%s: metadata doesn't have frame number, skip this result", __FUNCTION__);
+        return;
+    }
+    int32_t frameNumber = entry.data.i32[0];
+
+    ALOGVV("Got preview metadata for frame %d with timestamp %" PRId64, frameNumber, timestamp);
 
     if (mState != RUNNING) return;
 
+    // Corresponding buffer has been cleared. No need to push into mFrameList
+    if (timestamp <= mLatestClearedBufferTimestamp) return;
+
     mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
-    mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
-
-    findMatchesLocked();
-}
-
-void ZslProcessor::onBufferReleased(buffer_handle_t *handle) {
-    Mutex::Autolock l(mInputMutex);
-
-    // Verify that the buffer is in our queue
-    size_t i = 0;
-    for (; i < mZslQueue.size(); i++) {
-        if (&(mZslQueue[i].buffer.mGraphicBuffer->handle) == handle) break;
-    }
-    if (i == mZslQueue.size()) {
-        ALOGW("%s: Released buffer %p not found in queue",
-                __FUNCTION__, handle);
-    }
-
-    // Erase entire ZSL queue since we've now completed the capture and preview
-    // is stopped.
-    clearZslQueueLocked();
-
-    mState = RUNNING;
+    mFrameListHead = (mFrameListHead + 1) % mFrameListDepth;
 }
 
 status_t ZslProcessor::updateStream(const Parameters &params) {
@@ -124,25 +137,13 @@
         ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
         return INVALID_OPERATION;
     }
-    sp<CameraDeviceBase> device = mDevice.promote();
+    sp<Camera3Device> device =
+        static_cast<Camera3Device*>(client->getCameraDevice().get());
     if (device == 0) {
         ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
         return INVALID_OPERATION;
     }
 
-    if (mZslConsumer == 0) {
-        // Create CPU buffer queue endpoint
-        sp<IGraphicBufferProducer> producer;
-        sp<IGraphicBufferConsumer> consumer;
-        BufferQueue::createBufferQueue(&producer, &consumer);
-        mZslConsumer = new BufferItemConsumer(consumer,
-            GRALLOC_USAGE_HW_CAMERA_ZSL,
-            kZslBufferDepth);
-        mZslConsumer->setFrameAvailableListener(this);
-        mZslConsumer->setName(String8("Camera2-ZslConsumer"));
-        mZslWindow = new Surface(producer);
-    }
-
     if (mZslStreamId != NO_STREAM) {
         // Check if stream parameters have to change
         uint32_t currentWidth, currentHeight;
@@ -151,57 +152,50 @@
         if (res != OK) {
             ALOGE("%s: Camera %d: Error querying capture output stream info: "
                     "%s (%d)", __FUNCTION__,
-                    mId, strerror(-res), res);
+                    client->getCameraId(), strerror(-res), res);
             return res;
         }
         if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
                 currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
-            res = device->deleteReprocessStream(mZslReprocessStreamId);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to delete old reprocess stream "
-                        "for ZSL: %s (%d)", __FUNCTION__,
-                        mId, strerror(-res), res);
-                return res;
-            }
-            ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed",
-                __FUNCTION__, mId, mZslStreamId);
+            ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
+                  "dimensions changed",
+                __FUNCTION__, client->getCameraId(), mZslStreamId);
             res = device->deleteStream(mZslStreamId);
-            if (res != OK) {
+            if (res == -EBUSY) {
+                ALOGV("%s: Camera %d: Device is busy, call updateStream again "
+                      " after it becomes idle", __FUNCTION__, mId);
+                return res;
+            } else if(res != OK) {
                 ALOGE("%s: Camera %d: Unable to delete old output stream "
                         "for ZSL: %s (%d)", __FUNCTION__,
-                        mId, strerror(-res), res);
+                        client->getCameraId(), strerror(-res), res);
                 return res;
             }
             mZslStreamId = NO_STREAM;
         }
     }
 
-    mDeleted = false;
-
     if (mZslStreamId == NO_STREAM) {
         // Create stream for HAL production
         // TODO: Sort out better way to select resolution for ZSL
-        int streamType = params.quirks.useZslFormat ?
-                (int)CAMERA2_HAL_PIXEL_FORMAT_ZSL :
-                (int)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
-        res = device->createStream(mZslWindow,
-                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight, streamType,
-                HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mZslStreamId);
+
+        // Note that format specified internally in Camera3ZslStream
+        res = device->createZslStream(
+                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
+                mBufferQueueDepth,
+                &mZslStreamId,
+                &mZslStream);
         if (res != OK) {
-            ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
-                    "%s (%d)", __FUNCTION__, mId,
+            ALOGE("%s: Camera %d: Can't create ZSL stream: "
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
                     strerror(-res), res);
             return res;
         }
-        res = device->createReprocessStreamFromStream(mZslStreamId,
-                &mZslReprocessStreamId);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: "
-                    "%s (%d)", __FUNCTION__, mId,
-                    strerror(-res), res);
-            return res;
-        }
+
+        // Only add the camera3 buffer listener when the stream is created.
+        mZslStream->addBufferListener(this);
     }
+
     client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
             Camera2Client::kPreviewRequestIdEnd,
             this,
@@ -212,47 +206,32 @@
 
 status_t ZslProcessor::deleteStream() {
     ATRACE_CALL();
-    Mutex::Autolock l(mInputMutex);
-    // WAR(b/15408128): do not delete stream unless client is being disconnected.
-    mDeleted = true;
-    return OK;
-}
-
-status_t ZslProcessor::disconnect() {
-    ATRACE_CALL();
     status_t res;
 
     Mutex::Autolock l(mInputMutex);
 
     if (mZslStreamId != NO_STREAM) {
-        sp<CameraDeviceBase> device = mDevice.promote();
+        sp<Camera2Client> client = mClient.promote();
+        if (client == 0) {
+            ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+            return INVALID_OPERATION;
+        }
+
+        sp<Camera3Device> device =
+            reinterpret_cast<Camera3Device*>(client->getCameraDevice().get());
         if (device == 0) {
             ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
             return INVALID_OPERATION;
         }
 
-        clearZslQueueLocked();
-
-        res = device->deleteReprocessStream(mZslReprocessStreamId);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Cannot delete ZSL reprocessing stream %d: "
-                    "%s (%d)", __FUNCTION__, mId,
-                    mZslReprocessStreamId, strerror(-res), res);
-            return res;
-        }
-
-        mZslReprocessStreamId = NO_STREAM;
         res = device->deleteStream(mZslStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
-                    "%s (%d)", __FUNCTION__, mId,
+                    "%s (%d)", __FUNCTION__, client->getCameraId(),
                     mZslStreamId, strerror(-res), res);
             return res;
         }
 
-        mZslWindow.clear();
-        mZslConsumer.clear();
-
         mZslStreamId = NO_STREAM;
     }
     return OK;
@@ -263,6 +242,46 @@
     return mZslStreamId;
 }
 
+status_t ZslProcessor::updateRequestWithDefaultStillRequest(CameraMetadata &request) const {
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) {
+        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+    sp<Camera3Device> device =
+        static_cast<Camera3Device*>(client->getCameraDevice().get());
+    if (device == 0) {
+        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    CameraMetadata stillTemplate;
+    device->createDefaultRequest(CAMERA3_TEMPLATE_STILL_CAPTURE, &stillTemplate);
+
+    // Find some of the post-processing tags, and assign the value from template to the request.
+    // Only check the aberration mode and noise reduction mode for now, as they are very important
+    // for image quality.
+    uint32_t postProcessingTags[] = {
+            ANDROID_NOISE_REDUCTION_MODE,
+            ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+            ANDROID_COLOR_CORRECTION_MODE,
+            ANDROID_TONEMAP_MODE,
+            ANDROID_SHADING_MODE,
+            ANDROID_HOT_PIXEL_MODE,
+            ANDROID_EDGE_MODE
+    };
+
+    camera_metadata_entry_t entry;
+    for (size_t i = 0; i < sizeof(postProcessingTags) / sizeof(uint32_t); i++) {
+        entry = stillTemplate.find(postProcessingTags[i]);
+        if (entry.count > 0) {
+            request.update(postProcessingTags[i], entry.data.u8, 1);
+        }
+    }
+
+    return OK;
+}
+
 status_t ZslProcessor::pushToReprocess(int32_t requestId) {
     ALOGV("%s: Send in reprocess request with id %d",
             __FUNCTION__, requestId);
@@ -279,21 +298,30 @@
         dumpZslQueue(-1);
     }
 
-    if (mZslQueueTail != mZslQueueHead) {
-        CameraMetadata request;
-        size_t index = mZslQueueTail;
-        while (index != mZslQueueHead) {
-            if (!mZslQueue[index].frame.isEmpty()) {
-                request = mZslQueue[index].frame;
-                break;
-            }
-            index = (index + 1) % kZslBufferDepth;
-        }
-        if (index == mZslQueueHead) {
-            ALOGV("%s: ZSL queue has no valid frames to send yet.",
-                  __FUNCTION__);
-            return NOT_ENOUGH_DATA;
-        }
+    size_t metadataIdx;
+    nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx);
+
+    if (candidateTimestamp == -1) {
+        ALOGE("%s: Could not find good candidate for ZSL reprocessing",
+              __FUNCTION__);
+        return NOT_ENOUGH_DATA;
+    }
+
+    res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp,
+                                                    /*actualTimestamp*/NULL);
+
+    if (res == mZslStream->NO_BUFFER_AVAILABLE) {
+        ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
+        return NOT_ENOUGH_DATA;
+    } else if (res != OK) {
+        ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    {
+        CameraMetadata request = mFrameList[metadataIdx];
+
         // Verify that the frame is reasonable for reprocessing
 
         camera_metadata_entry_t entry;
@@ -310,25 +338,51 @@
             return NOT_ENOUGH_DATA;
         }
 
-        buffer_handle_t *handle =
-            &(mZslQueue[index].buffer.mGraphicBuffer->handle);
-
         uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
         res = request.update(ANDROID_REQUEST_TYPE,
                 &requestType, 1);
+        if (res != OK) {
+            ALOGE("%s: Unable to update request type",
+                  __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
         int32_t inputStreams[1] =
-                { mZslReprocessStreamId };
-        if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+                { mZslStreamId };
+        res = request.update(ANDROID_REQUEST_INPUT_STREAMS,
                 inputStreams, 1);
+        if (res != OK) {
+            ALOGE("%s: Unable to update request input streams",
+                  __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
+        uint8_t captureIntent =
+                static_cast<uint8_t>(ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
+        res = request.update(ANDROID_CONTROL_CAPTURE_INTENT,
+                &captureIntent, 1);
+        if (res != OK ) {
+            ALOGE("%s: Unable to update request capture intent",
+                  __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
+        // TODO: Shouldn't we also update the latest preview frame?
         int32_t outputStreams[1] =
                 { client->getCaptureStreamId() };
-        if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+        res = request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
                 outputStreams, 1);
+        if (res != OK) {
+            ALOGE("%s: Unable to update request output streams",
+                  __FUNCTION__);
+            return INVALID_OPERATION;
+        }
+
         res = request.update(ANDROID_REQUEST_ID,
                 &requestId, 1);
-
         if (res != OK ) {
-            ALOGE("%s: Unable to update frame to a reprocess request", __FUNCTION__);
+            ALOGE("%s: Unable to update frame to a reprocess request",
+                  __FUNCTION__);
             return INVALID_OPERATION;
         }
 
@@ -336,17 +390,9 @@
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
                 "%s (%d)",
-                __FUNCTION__, mId, strerror(-res), res);
+                __FUNCTION__, client->getCameraId(), strerror(-res), res);
             return INVALID_OPERATION;
         }
-        // TODO: have push-and-clear be atomic
-        res = client->getCameraDevice()->pushReprocessBuffer(mZslReprocessStreamId,
-                handle, this);
-        if (res != OK) {
-            ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
-                    __FUNCTION__, strerror(-res), res);
-            return res;
-        }
 
         // Update JPEG settings
         {
@@ -355,25 +401,30 @@
             if (res != OK) {
                 ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
                         "capture request: %s (%d)", __FUNCTION__,
-                        mId,
+                        client->getCameraId(),
                         strerror(-res), res);
                 return res;
             }
         }
 
+        // Update post-processing settings
+        res = updateRequestWithDefaultStillRequest(request);
+        if (res != OK) {
+            ALOGW("%s: Unable to update post-processing tags, the reprocessed image quality "
+                    "may be compromised", __FUNCTION__);
+        }
+
         mLatestCapturedRequest = request;
         res = client->getCameraDevice()->capture(request);
         if (res != OK ) {
-            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s (%d)",
-                    __FUNCTION__, strerror(-res), res);
+            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s"
+                  " (%d)", __FUNCTION__, strerror(-res), res);
             return res;
         }
 
         mState = LOCKED;
-    } else {
-        ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
-        return NOT_ENOUGH_DATA;
     }
+
     return OK;
 }
 
@@ -386,17 +437,20 @@
 }
 
 status_t ZslProcessor::clearZslQueueLocked() {
-    for (size_t i = 0; i < mZslQueue.size(); i++) {
-        if (mZslQueue[i].buffer.mTimestamp != 0) {
-            mZslConsumer->releaseBuffer(mZslQueue[i].buffer);
-        }
-        mZslQueue.replaceAt(i);
+    if (mZslStream != 0) {
+        // clear result metadata list first.
+        clearZslResultQueueLocked();
+        return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp);
     }
-    mZslQueueHead = 0;
-    mZslQueueTail = 0;
     return OK;
 }
 
+void ZslProcessor::clearZslResultQueueLocked() {
+    mFrameList.clear();
+    mFrameListHead = 0;
+    mFrameList.insertAt(0, mFrameListDepth);
+}
+
 void ZslProcessor::dump(int fd, const Vector<String16>& /*args*/) const {
     Mutex::Autolock l(mInputMutex);
     if (!mLatestCapturedRequest.isEmpty()) {
@@ -411,128 +465,9 @@
 }
 
 bool ZslProcessor::threadLoop() {
-    status_t res;
-
-    {
-        Mutex::Autolock l(mInputMutex);
-        while (!mZslBufferAvailable) {
-            res = mZslBufferAvailableSignal.waitRelative(mInputMutex,
-                    kWaitDuration);
-            if (res == TIMED_OUT) return true;
-        }
-        mZslBufferAvailable = false;
-    }
-
-    do {
-        res = processNewZslBuffer();
-    } while (res == OK);
-
-    return true;
-}
-
-status_t ZslProcessor::processNewZslBuffer() {
-    ATRACE_CALL();
-    status_t res;
-    sp<BufferItemConsumer> zslConsumer;
-    {
-        Mutex::Autolock l(mInputMutex);
-        if (mZslConsumer == 0) return OK;
-        zslConsumer = mZslConsumer;
-    }
-    ALOGVV("Trying to get next buffer");
-    BufferItem item;
-    res = zslConsumer->acquireBuffer(&item, 0);
-    if (res != OK) {
-        if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
-            ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
-                    "%s (%d)", __FUNCTION__,
-                    mId, strerror(-res), res);
-        } else {
-            ALOGVV("  No buffer");
-        }
-        return res;
-    }
-
-    Mutex::Autolock l(mInputMutex);
-
-    if (mState == LOCKED) {
-        ALOGVV("In capture, discarding new ZSL buffers");
-        zslConsumer->releaseBuffer(item);
-        return OK;
-    }
-
-    ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail);
-
-    if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) {
-        ALOGVV("Releasing oldest buffer");
-        zslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
-        mZslQueue.replaceAt(mZslQueueTail);
-        mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth;
-    }
-
-    ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead);
-
-    queueHead.buffer = item;
-    queueHead.frame.release();
-
-    mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth;
-
-    ALOGVV("  Acquired buffer, timestamp %" PRId64, queueHead.buffer.mTimestamp);
-
-    findMatchesLocked();
-
-    return OK;
-}
-
-void ZslProcessor::findMatchesLocked() {
-    ALOGVV("Scanning");
-    for (size_t i = 0; i < mZslQueue.size(); i++) {
-        ZslPair &queueEntry = mZslQueue.editItemAt(i);
-        nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
-        IF_ALOGV() {
-            camera_metadata_entry_t entry;
-            nsecs_t frameTimestamp = 0;
-            if (!queueEntry.frame.isEmpty()) {
-                entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
-                frameTimestamp = entry.data.i64[0];
-            }
-            ALOGVV("   %d: b: %" PRId64 "\tf: %" PRId64, i,
-                    bufferTimestamp, frameTimestamp );
-        }
-        if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) {
-            // Have buffer, no matching frame. Look for one
-            for (size_t j = 0; j < mFrameList.size(); j++) {
-                bool match = false;
-                CameraMetadata &frame = mFrameList.editItemAt(j);
-                if (!frame.isEmpty()) {
-                    camera_metadata_entry_t entry;
-                    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
-                    if (entry.count == 0) {
-                        ALOGE("%s: Can't find timestamp in frame!",
-                                __FUNCTION__);
-                        continue;
-                    }
-                    nsecs_t frameTimestamp = entry.data.i64[0];
-                    if (bufferTimestamp == frameTimestamp) {
-                        ALOGVV("%s: Found match %" PRId64, __FUNCTION__,
-                                frameTimestamp);
-                        match = true;
-                    } else {
-                        int64_t delta = abs(bufferTimestamp - frameTimestamp);
-                        if ( delta < 1000000) {
-                            ALOGVV("%s: Found close match %" PRId64 " (delta %" PRId64 ")",
-                                    __FUNCTION__, bufferTimestamp, delta);
-                            match = true;
-                        }
-                    }
-                }
-                if (match) {
-                    queueEntry.frame.acquire(frame);
-                    break;
-                }
-            }
-        }
-    }
+    // TODO: remove dependency on thread. For now, shut thread down right
+    // away.
+    return false;
 }
 
 void ZslProcessor::dumpZslQueue(int fd) const {
@@ -567,5 +502,174 @@
     }
 }
 
+bool ZslProcessor::isFixedFocusMode(uint8_t afMode) const {
+    switch (afMode) {
+        case ANDROID_CONTROL_AF_MODE_AUTO:
+        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+        case ANDROID_CONTROL_AF_MODE_MACRO:
+            return false;
+            break;
+        case ANDROID_CONTROL_AF_MODE_OFF:
+        case ANDROID_CONTROL_AF_MODE_EDOF:
+            return true;
+        default:
+            ALOGE("%s: unknown focus mode %d", __FUNCTION__, afMode);
+            return false;
+    }
+}
+
+nsecs_t ZslProcessor::getCandidateTimestampLocked(size_t* metadataIdx) const {
+    /**
+     * Find the smallest timestamp we know about so far
+     * - ensure that aeState is either converged or locked
+     */
+
+    size_t idx = 0;
+    nsecs_t minTimestamp = -1;
+
+    size_t emptyCount = mFrameList.size();
+
+    for (size_t j = 0; j < mFrameList.size(); j++) {
+        const CameraMetadata &frame = mFrameList[j];
+        if (!frame.isEmpty()) {
+
+            emptyCount--;
+
+            camera_metadata_ro_entry_t entry;
+            entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+            if (entry.count == 0) {
+                ALOGE("%s: Can't find timestamp in frame!",
+                        __FUNCTION__);
+                continue;
+            }
+            nsecs_t frameTimestamp = entry.data.i64[0];
+            if (minTimestamp > frameTimestamp || minTimestamp == -1) {
+
+                entry = frame.find(ANDROID_CONTROL_AE_STATE);
+
+                if (entry.count == 0) {
+                    /**
+                     * This is most likely a HAL bug. The aeState field is
+                     * mandatory, so it should always be in a metadata packet.
+                     */
+                    ALOGW("%s: ZSL queue frame has no AE state field!",
+                            __FUNCTION__);
+                    continue;
+                }
+                if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
+                        entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
+                    ALOGVV("%s: ZSL queue frame AE state is %d, need "
+                           "full capture",  __FUNCTION__, entry.data.u8[0]);
+                    continue;
+                }
+
+                entry = frame.find(ANDROID_CONTROL_AF_MODE);
+                if (entry.count == 0) {
+                    ALOGW("%s: ZSL queue frame has no AF mode field!",
+                            __FUNCTION__);
+                    continue;
+                }
+                uint8_t afMode = entry.data.u8[0];
+                if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
+                    // Skip all the ZSL buffer for manual AF mode, as we don't really
+                    // know the af state.
+                    continue;
+                }
+
+                // Check AF state if device has focuser and focus mode isn't fixed
+                if (mHasFocuser && !isFixedFocusMode(afMode)) {
+                    // Make sure the candidate frame has good focus.
+                    entry = frame.find(ANDROID_CONTROL_AF_STATE);
+                    if (entry.count == 0) {
+                        ALOGW("%s: ZSL queue frame has no AF state field!",
+                                __FUNCTION__);
+                        continue;
+                    }
+                    uint8_t afState = entry.data.u8[0];
+                    if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
+                            afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
+                            afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
+                        ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
+                                __FUNCTION__, afState);
+                        continue;
+                    }
+                }
+
+                minTimestamp = frameTimestamp;
+                idx = j;
+            }
+
+            ALOGVV("%s: Saw timestamp %" PRId64, __FUNCTION__, frameTimestamp);
+        }
+    }
+
+    if (emptyCount == mFrameList.size()) {
+        /**
+         * This could be mildly bad and means our ZSL was triggered before
+         * there were any frames yet received by the camera framework.
+         *
+         * This is a fairly corner case which can happen under:
+         * + a user presses the shutter button real fast when the camera starts
+         *     (startPreview followed immediately by takePicture).
+         * + burst capture case (hitting shutter button as fast possible)
+         *
+         * If this happens in steady case (preview running for a while, call
+         *     a single takePicture) then this might be a fwk bug.
+         */
+        ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
+    }
+
+    ALOGV("%s: Candidate timestamp %" PRId64 " (idx %zu), empty frames: %zu",
+          __FUNCTION__, minTimestamp, idx, emptyCount);
+
+    if (metadataIdx) {
+        *metadataIdx = idx;
+    }
+
+    return minTimestamp;
+}
+
+void ZslProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
+    // Intentionally left empty
+    // Although theoretically we could use this to get better dump info
+}
+
+void ZslProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
+
+    // ignore output buffers
+    if (bufferInfo.mOutput) {
+        return;
+    }
+
+    // Lock mutex only once we know this is an input buffer returned to avoid
+    // potential deadlock
+    Mutex::Autolock l(mInputMutex);
+    // TODO: Verify that the buffer is in our queue by looking at timestamp
+    // theoretically unnecessary unless we change the following assumptions:
+    // -- only 1 buffer reprocessed at a time (which is the case now)
+
+    // Erase entire ZSL queue since we've now completed the capture and preview
+    // is stopped.
+    //
+    // We need to guarantee that if we do two back-to-back captures,
+    // the second won't use a buffer that's older/the same as the first, which
+    // is theoretically possible if we don't clear out the queue and the
+    // selection criteria is something like 'newest'. Clearing out the result
+    // metadata queue on a completed capture ensures we'll only use new timestamp.
+    // Calling clearZslQueueLocked is a guaranteed deadlock because this callback
+    // holds the Camera3Stream internal lock (mLock), and clearZslQueueLocked requires
+    // to hold the same lock.
+    // TODO: need figure out a way to clear the Zsl buffer queue properly. Right now
+    // it is safe not to do so, as back to back ZSL capture requires stop and start
+    // preview, which will flush ZSL queue automatically.
+    ALOGV("%s: Memory optimization, clearing ZSL queue",
+          __FUNCTION__);
+    clearZslResultQueueLocked();
+
+    // Required so we accept more ZSL requests
+    mState = RUNNING;
+}
+
 }; // namespace camera2
 }; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index 5870bd3..86c06c6 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -25,11 +25,9 @@
 #include <gui/BufferItem.h>
 #include <gui/BufferItemConsumer.h>
 #include <camera/CameraMetadata.h>
-#include <camera/CaptureResult.h>
 
-#include "common/CameraDeviceBase.h"
-#include "api1/client2/ZslProcessorInterface.h"
 #include "api1/client2/FrameProcessor.h"
+#include "device3/Camera3ZslStream.h"
 
 namespace android {
 
@@ -38,45 +36,66 @@
 namespace camera2 {
 
 class CaptureSequencer;
-class Parameters;
+struct Parameters;
 
 /***
- * ZSL queue processing
+ * ZSL queue processing for HALv3.0 or newer
  */
-class ZslProcessor:
+class ZslProcessor :
+                    public camera3::Camera3StreamBufferListener,
             virtual public Thread,
-            virtual public BufferItemConsumer::FrameAvailableListener,
-            virtual public FrameProcessor::FilteredListener,
-            virtual public CameraDeviceBase::BufferReleasedListener,
-                    public ZslProcessorInterface {
+            virtual public FrameProcessor::FilteredListener {
   public:
     ZslProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
     ~ZslProcessor();
 
-    // From mZslConsumer
-    virtual void onFrameAvailable(const BufferItem& item);
-    // From FrameProcessor
+    // From FrameProcessor::FilteredListener
     virtual void onResultAvailable(const CaptureResult &result);
 
-    virtual void onBufferReleased(buffer_handle_t *handle);
-
     /**
      ****************************************
      * ZslProcessorInterface implementation *
      ****************************************
      */
 
+    // Update the streams by recreating them if the size/format has changed
     status_t updateStream(const Parameters &params);
+
+    // Delete the underlying CameraDevice streams
     status_t deleteStream();
-    status_t disconnect();
+
+    // Get ID for use with android.request.outputStreams / inputStreams
     int getStreamId() const;
 
+    /**
+     * Submits a ZSL capture request (id = requestId)
+     *
+     * An appropriate ZSL buffer is selected by the closest timestamp,
+     * then we push that buffer to be reprocessed by the HAL.
+     * A capture request is created and submitted on behalf of the client.
+     */
     status_t pushToReprocess(int32_t requestId);
+
+    // Flush the ZSL buffer queue, freeing up all the buffers
     status_t clearZslQueue();
 
     void dump(int fd, const Vector<String16>& args) const;
+
+  protected:
+    /**
+     **********************************************
+     * Camera3StreamBufferListener implementation *
+     **********************************************
+     */
+    typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo;
+    // Buffer was acquired by the HAL
+    virtual void onBufferAcquired(const BufferInfo& bufferInfo);
+    // Buffer was released by the HAL
+    virtual void onBufferReleased(const BufferInfo& bufferInfo);
+
   private:
     static const nsecs_t kWaitDuration = 10000000; // 10 ms
+    nsecs_t mLatestClearedBufferTimestamp;
 
     enum {
         RUNNING,
@@ -84,53 +103,52 @@
     } mState;
 
     wp<Camera2Client> mClient;
-    wp<CameraDeviceBase> mDevice;
     wp<CaptureSequencer> mSequencer;
-    int mId;
 
-    bool mDeleted;
+    const int mId;
 
     mutable Mutex mInputMutex;
-    bool mZslBufferAvailable;
-    Condition mZslBufferAvailableSignal;
 
     enum {
         NO_STREAM = -1
     };
 
     int mZslStreamId;
-    int mZslReprocessStreamId;
-    sp<BufferItemConsumer> mZslConsumer;
-    sp<Surface>            mZslWindow;
+    sp<camera3::Camera3ZslStream> mZslStream;
 
     struct ZslPair {
         BufferItem buffer;
         CameraMetadata frame;
     };
 
-    static const size_t kZslBufferDepth = 4;
-    static const size_t kFrameListDepth = kZslBufferDepth * 2;
+    static const int32_t kDefaultMaxPipelineDepth = 4;
+    size_t mBufferQueueDepth;
+    size_t mFrameListDepth;
     Vector<CameraMetadata> mFrameList;
     size_t mFrameListHead;
 
     ZslPair mNextPair;
 
     Vector<ZslPair> mZslQueue;
-    size_t mZslQueueHead;
-    size_t mZslQueueTail;
 
     CameraMetadata mLatestCapturedRequest;
 
+    bool mHasFocuser;
+
     virtual bool threadLoop();
 
-    status_t processNewZslBuffer();
-
-    // Match up entries from frame list to buffers in ZSL queue
-    void findMatchesLocked();
-
     status_t clearZslQueueLocked();
 
+    void clearZslResultQueueLocked();
+
     void dumpZslQueue(int id) const;
+
+    nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
+
+    bool isFixedFocusMode(uint8_t afMode) const;
+
+    // Update the post-processing metadata with the default still capture request template
+    status_t updateRequestWithDefaultStillRequest(CameraMetadata &request) const;
 };
 
 
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
deleted file mode 100644
index 69620ac..0000000
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera2-ZslProcessor3"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-//#define LOG_NNDEBUG 0
-
-#ifdef LOG_NNDEBUG
-#define ALOGVV(...) ALOGV(__VA_ARGS__)
-#else
-#define ALOGVV(...) ((void)0)
-#endif
-
-#include <inttypes.h>
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-#include <gui/Surface.h>
-
-#include "common/CameraDeviceBase.h"
-#include "api1/Camera2Client.h"
-#include "api1/client2/CaptureSequencer.h"
-#include "api1/client2/ZslProcessor3.h"
-#include "device3/Camera3Device.h"
-
-namespace android {
-namespace camera2 {
-
-ZslProcessor3::ZslProcessor3(
-    sp<Camera2Client> client,
-    wp<CaptureSequencer> sequencer):
-        Thread(false),
-        mLatestClearedBufferTimestamp(0),
-        mState(RUNNING),
-        mClient(client),
-        mSequencer(sequencer),
-        mId(client->getCameraId()),
-        mZslStreamId(NO_STREAM),
-        mFrameListHead(0),
-        mZslQueueHead(0),
-        mZslQueueTail(0),
-        mHasFocuser(false) {
-    // Initialize buffer queue and frame list based on pipeline max depth.
-    size_t pipelineMaxDepth = kDefaultMaxPipelineDepth;
-    if (client != 0) {
-        sp<Camera3Device> device =
-        static_cast<Camera3Device*>(client->getCameraDevice().get());
-        if (device != 0) {
-            camera_metadata_ro_entry_t entry =
-                device->info().find(ANDROID_REQUEST_PIPELINE_MAX_DEPTH);
-            if (entry.count == 1) {
-                pipelineMaxDepth = entry.data.u8[0];
-            } else {
-                ALOGW("%s: Unable to find the android.request.pipelineMaxDepth,"
-                        " use default pipeline max depth %zu", __FUNCTION__,
-                        kDefaultMaxPipelineDepth);
-            }
-
-            entry = device->info().find(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
-            if (entry.count > 0 && entry.data.f[0] != 0.) {
-                mHasFocuser = true;
-            }
-        }
-    }
-
-    ALOGV("%s: Initialize buffer queue and frame list depth based on max pipeline depth (%d)",
-          __FUNCTION__, pipelineMaxDepth);
-    // Need to keep buffer queue longer than metadata queue because sometimes buffer arrives
-    // earlier than metadata which causes the buffer corresponding to oldest metadata being
-    // removed.
-    mFrameListDepth = pipelineMaxDepth;
-    mBufferQueueDepth = mFrameListDepth + 1;
-
-
-    mZslQueue.insertAt(0, mBufferQueueDepth);
-    mFrameList.insertAt(0, mFrameListDepth);
-    sp<CaptureSequencer> captureSequencer = mSequencer.promote();
-    if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
-}
-
-ZslProcessor3::~ZslProcessor3() {
-    ALOGV("%s: Exit", __FUNCTION__);
-    deleteStream();
-}
-
-void ZslProcessor3::onResultAvailable(const CaptureResult &result) {
-    ATRACE_CALL();
-    ALOGV("%s:", __FUNCTION__);
-    Mutex::Autolock l(mInputMutex);
-    camera_metadata_ro_entry_t entry;
-    entry = result.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
-    nsecs_t timestamp = entry.data.i64[0];
-    if (entry.count == 0) {
-        ALOGE("%s: metadata doesn't have timestamp, skip this result", __FUNCTION__);
-        return;
-    }
-
-    entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
-    if (entry.count == 0) {
-        ALOGE("%s: metadata doesn't have frame number, skip this result", __FUNCTION__);
-        return;
-    }
-    int32_t frameNumber = entry.data.i32[0];
-
-    ALOGVV("Got preview metadata for frame %d with timestamp %" PRId64, frameNumber, timestamp);
-
-    if (mState != RUNNING) return;
-
-    // Corresponding buffer has been cleared. No need to push into mFrameList
-    if (timestamp <= mLatestClearedBufferTimestamp) return;
-
-    mFrameList.editItemAt(mFrameListHead) = result.mMetadata;
-    mFrameListHead = (mFrameListHead + 1) % mFrameListDepth;
-}
-
-status_t ZslProcessor3::updateStream(const Parameters &params) {
-    ATRACE_CALL();
-    ALOGV("%s: Configuring ZSL streams", __FUNCTION__);
-    status_t res;
-
-    Mutex::Autolock l(mInputMutex);
-
-    sp<Camera2Client> client = mClient.promote();
-    if (client == 0) {
-        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
-        return INVALID_OPERATION;
-    }
-    sp<Camera3Device> device =
-        static_cast<Camera3Device*>(client->getCameraDevice().get());
-    if (device == 0) {
-        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
-        return INVALID_OPERATION;
-    }
-
-    if (mZslStreamId != NO_STREAM) {
-        // Check if stream parameters have to change
-        uint32_t currentWidth, currentHeight;
-        res = device->getStreamInfo(mZslStreamId,
-                &currentWidth, &currentHeight, 0, 0);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Error querying capture output stream info: "
-                    "%s (%d)", __FUNCTION__,
-                    client->getCameraId(), strerror(-res), res);
-            return res;
-        }
-        if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
-                currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
-            ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
-                  "dimensions changed",
-                __FUNCTION__, client->getCameraId(), mZslStreamId);
-            res = device->deleteStream(mZslStreamId);
-            if (res == -EBUSY) {
-                ALOGV("%s: Camera %d: Device is busy, call updateStream again "
-                      " after it becomes idle", __FUNCTION__, mId);
-                return res;
-            } else if(res != OK) {
-                ALOGE("%s: Camera %d: Unable to delete old output stream "
-                        "for ZSL: %s (%d)", __FUNCTION__,
-                        client->getCameraId(), strerror(-res), res);
-                return res;
-            }
-            mZslStreamId = NO_STREAM;
-        }
-    }
-
-    if (mZslStreamId == NO_STREAM) {
-        // Create stream for HAL production
-        // TODO: Sort out better way to select resolution for ZSL
-
-        // Note that format specified internally in Camera3ZslStream
-        res = device->createZslStream(
-                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
-                mBufferQueueDepth,
-                &mZslStreamId,
-                &mZslStream);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Can't create ZSL stream: "
-                    "%s (%d)", __FUNCTION__, client->getCameraId(),
-                    strerror(-res), res);
-            return res;
-        }
-
-        // Only add the camera3 buffer listener when the stream is created.
-        mZslStream->addBufferListener(this);
-    }
-
-    client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
-            Camera2Client::kPreviewRequestIdEnd,
-            this,
-            /*sendPartials*/false);
-
-    return OK;
-}
-
-status_t ZslProcessor3::deleteStream() {
-    ATRACE_CALL();
-    status_t res;
-
-    Mutex::Autolock l(mInputMutex);
-
-    if (mZslStreamId != NO_STREAM) {
-        sp<Camera2Client> client = mClient.promote();
-        if (client == 0) {
-            ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
-            return INVALID_OPERATION;
-        }
-
-        sp<Camera3Device> device =
-            reinterpret_cast<Camera3Device*>(client->getCameraDevice().get());
-        if (device == 0) {
-            ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
-            return INVALID_OPERATION;
-        }
-
-        res = device->deleteStream(mZslStreamId);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
-                    "%s (%d)", __FUNCTION__, client->getCameraId(),
-                    mZslStreamId, strerror(-res), res);
-            return res;
-        }
-
-        mZslStreamId = NO_STREAM;
-    }
-    return OK;
-}
-
-int ZslProcessor3::getStreamId() const {
-    Mutex::Autolock l(mInputMutex);
-    return mZslStreamId;
-}
-
-status_t ZslProcessor3::updateRequestWithDefaultStillRequest(CameraMetadata &request) const {
-    sp<Camera2Client> client = mClient.promote();
-    if (client == 0) {
-        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
-        return INVALID_OPERATION;
-    }
-    sp<Camera3Device> device =
-        static_cast<Camera3Device*>(client->getCameraDevice().get());
-    if (device == 0) {
-        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
-        return INVALID_OPERATION;
-    }
-
-    CameraMetadata stillTemplate;
-    device->createDefaultRequest(CAMERA3_TEMPLATE_STILL_CAPTURE, &stillTemplate);
-
-    // Find some of the post-processing tags, and assign the value from template to the request.
-    // Only check the aberration mode and noise reduction mode for now, as they are very important
-    // for image quality.
-    uint32_t postProcessingTags[] = {
-            ANDROID_NOISE_REDUCTION_MODE,
-            ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
-            ANDROID_COLOR_CORRECTION_MODE,
-            ANDROID_TONEMAP_MODE,
-            ANDROID_SHADING_MODE,
-            ANDROID_HOT_PIXEL_MODE,
-            ANDROID_EDGE_MODE
-    };
-
-    camera_metadata_entry_t entry;
-    for (size_t i = 0; i < sizeof(postProcessingTags) / sizeof(uint32_t); i++) {
-        entry = stillTemplate.find(postProcessingTags[i]);
-        if (entry.count > 0) {
-            request.update(postProcessingTags[i], entry.data.u8, 1);
-        }
-    }
-
-    return OK;
-}
-
-status_t ZslProcessor3::pushToReprocess(int32_t requestId) {
-    ALOGV("%s: Send in reprocess request with id %d",
-            __FUNCTION__, requestId);
-    Mutex::Autolock l(mInputMutex);
-    status_t res;
-    sp<Camera2Client> client = mClient.promote();
-
-    if (client == 0) {
-        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
-        return INVALID_OPERATION;
-    }
-
-    IF_ALOGV() {
-        dumpZslQueue(-1);
-    }
-
-    size_t metadataIdx;
-    nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx);
-
-    if (candidateTimestamp == -1) {
-        ALOGE("%s: Could not find good candidate for ZSL reprocessing",
-              __FUNCTION__);
-        return NOT_ENOUGH_DATA;
-    }
-
-    res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp,
-                                                    /*actualTimestamp*/NULL);
-
-    if (res == mZslStream->NO_BUFFER_AVAILABLE) {
-        ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
-        return NOT_ENOUGH_DATA;
-    } else if (res != OK) {
-        ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
-
-    {
-        CameraMetadata request = mFrameList[metadataIdx];
-
-        // Verify that the frame is reasonable for reprocessing
-
-        camera_metadata_entry_t entry;
-        entry = request.find(ANDROID_CONTROL_AE_STATE);
-        if (entry.count == 0) {
-            ALOGE("%s: ZSL queue frame has no AE state field!",
-                    __FUNCTION__);
-            return BAD_VALUE;
-        }
-        if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
-                entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
-            ALOGV("%s: ZSL queue frame AE state is %d, need full capture",
-                    __FUNCTION__, entry.data.u8[0]);
-            return NOT_ENOUGH_DATA;
-        }
-
-        uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
-        res = request.update(ANDROID_REQUEST_TYPE,
-                &requestType, 1);
-        if (res != OK) {
-            ALOGE("%s: Unable to update request type",
-                  __FUNCTION__);
-            return INVALID_OPERATION;
-        }
-
-        int32_t inputStreams[1] =
-                { mZslStreamId };
-        res = request.update(ANDROID_REQUEST_INPUT_STREAMS,
-                inputStreams, 1);
-        if (res != OK) {
-            ALOGE("%s: Unable to update request input streams",
-                  __FUNCTION__);
-            return INVALID_OPERATION;
-        }
-
-        uint8_t captureIntent =
-                static_cast<uint8_t>(ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE);
-        res = request.update(ANDROID_CONTROL_CAPTURE_INTENT,
-                &captureIntent, 1);
-        if (res != OK ) {
-            ALOGE("%s: Unable to update request capture intent",
-                  __FUNCTION__);
-            return INVALID_OPERATION;
-        }
-
-        // TODO: Shouldn't we also update the latest preview frame?
-        int32_t outputStreams[1] =
-                { client->getCaptureStreamId() };
-        res = request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
-                outputStreams, 1);
-        if (res != OK) {
-            ALOGE("%s: Unable to update request output streams",
-                  __FUNCTION__);
-            return INVALID_OPERATION;
-        }
-
-        res = request.update(ANDROID_REQUEST_ID,
-                &requestId, 1);
-        if (res != OK ) {
-            ALOGE("%s: Unable to update frame to a reprocess request",
-                  __FUNCTION__);
-            return INVALID_OPERATION;
-        }
-
-        res = client->stopStream();
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
-                "%s (%d)",
-                __FUNCTION__, client->getCameraId(), strerror(-res), res);
-            return INVALID_OPERATION;
-        }
-
-        // Update JPEG settings
-        {
-            SharedParameters::Lock l(client->getParameters());
-            res = l.mParameters.updateRequestJpeg(&request);
-            if (res != OK) {
-                ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
-                        "capture request: %s (%d)", __FUNCTION__,
-                        client->getCameraId(),
-                        strerror(-res), res);
-                return res;
-            }
-        }
-
-        // Update post-processing settings
-        res = updateRequestWithDefaultStillRequest(request);
-        if (res != OK) {
-            ALOGW("%s: Unable to update post-processing tags, the reprocessed image quality "
-                    "may be compromised", __FUNCTION__);
-        }
-
-        mLatestCapturedRequest = request;
-        res = client->getCameraDevice()->capture(request);
-        if (res != OK ) {
-            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s"
-                  " (%d)", __FUNCTION__, strerror(-res), res);
-            return res;
-        }
-
-        mState = LOCKED;
-    }
-
-    return OK;
-}
-
-status_t ZslProcessor3::clearZslQueue() {
-    Mutex::Autolock l(mInputMutex);
-    // If in middle of capture, can't clear out queue
-    if (mState == LOCKED) return OK;
-
-    return clearZslQueueLocked();
-}
-
-status_t ZslProcessor3::clearZslQueueLocked() {
-    if (mZslStream != 0) {
-        // clear result metadata list first.
-        clearZslResultQueueLocked();
-        return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp);
-    }
-    return OK;
-}
-
-void ZslProcessor3::clearZslResultQueueLocked() {
-    mFrameList.clear();
-    mFrameListHead = 0;
-    mFrameList.insertAt(0, mFrameListDepth);
-}
-
-void ZslProcessor3::dump(int fd, const Vector<String16>& /*args*/) const {
-    Mutex::Autolock l(mInputMutex);
-    if (!mLatestCapturedRequest.isEmpty()) {
-        String8 result("    Latest ZSL capture request:\n");
-        write(fd, result.string(), result.size());
-        mLatestCapturedRequest.dump(fd, 2, 6);
-    } else {
-        String8 result("    Latest ZSL capture request: none yet\n");
-        write(fd, result.string(), result.size());
-    }
-    dumpZslQueue(fd);
-}
-
-bool ZslProcessor3::threadLoop() {
-    // TODO: remove dependency on thread. For now, shut thread down right
-    // away.
-    return false;
-}
-
-void ZslProcessor3::dumpZslQueue(int fd) const {
-    String8 header("ZSL queue contents:");
-    String8 indent("    ");
-    ALOGV("%s", header.string());
-    if (fd != -1) {
-        header = indent + header + "\n";
-        write(fd, header.string(), header.size());
-    }
-    for (size_t i = 0; i < mZslQueue.size(); i++) {
-        const ZslPair &queueEntry = mZslQueue[i];
-        nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
-        camera_metadata_ro_entry_t entry;
-        nsecs_t frameTimestamp = 0;
-        int frameAeState = -1;
-        if (!queueEntry.frame.isEmpty()) {
-            entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
-            if (entry.count > 0) frameTimestamp = entry.data.i64[0];
-            entry = queueEntry.frame.find(ANDROID_CONTROL_AE_STATE);
-            if (entry.count > 0) frameAeState = entry.data.u8[0];
-        }
-        String8 result =
-                String8::format("   %zu: b: %" PRId64 "\tf: %" PRId64 ", AE state: %d", i,
-                        bufferTimestamp, frameTimestamp, frameAeState);
-        ALOGV("%s", result.string());
-        if (fd != -1) {
-            result = indent + result + "\n";
-            write(fd, result.string(), result.size());
-        }
-
-    }
-}
-
-bool ZslProcessor3::isFixedFocusMode(uint8_t afMode) const {
-    switch (afMode) {
-        case ANDROID_CONTROL_AF_MODE_AUTO:
-        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
-        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
-        case ANDROID_CONTROL_AF_MODE_MACRO:
-            return false;
-            break;
-        case ANDROID_CONTROL_AF_MODE_OFF:
-        case ANDROID_CONTROL_AF_MODE_EDOF:
-            return true;
-        default:
-            ALOGE("%s: unknown focus mode %d", __FUNCTION__, afMode);
-            return false;
-    }
-}
-
-nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
-    /**
-     * Find the smallest timestamp we know about so far
-     * - ensure that aeState is either converged or locked
-     */
-
-    size_t idx = 0;
-    nsecs_t minTimestamp = -1;
-
-    size_t emptyCount = mFrameList.size();
-
-    for (size_t j = 0; j < mFrameList.size(); j++) {
-        const CameraMetadata &frame = mFrameList[j];
-        if (!frame.isEmpty()) {
-
-            emptyCount--;
-
-            camera_metadata_ro_entry_t entry;
-            entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
-            if (entry.count == 0) {
-                ALOGE("%s: Can't find timestamp in frame!",
-                        __FUNCTION__);
-                continue;
-            }
-            nsecs_t frameTimestamp = entry.data.i64[0];
-            if (minTimestamp > frameTimestamp || minTimestamp == -1) {
-
-                entry = frame.find(ANDROID_CONTROL_AE_STATE);
-
-                if (entry.count == 0) {
-                    /**
-                     * This is most likely a HAL bug. The aeState field is
-                     * mandatory, so it should always be in a metadata packet.
-                     */
-                    ALOGW("%s: ZSL queue frame has no AE state field!",
-                            __FUNCTION__);
-                    continue;
-                }
-                if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
-                        entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
-                    ALOGVV("%s: ZSL queue frame AE state is %d, need "
-                           "full capture",  __FUNCTION__, entry.data.u8[0]);
-                    continue;
-                }
-
-                entry = frame.find(ANDROID_CONTROL_AF_MODE);
-                if (entry.count == 0) {
-                    ALOGW("%s: ZSL queue frame has no AF mode field!",
-                            __FUNCTION__);
-                    continue;
-                }
-                uint8_t afMode = entry.data.u8[0];
-                if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
-                    // Skip all the ZSL buffer for manual AF mode, as we don't really
-                    // know the af state.
-                    continue;
-                }
-
-                // Check AF state if device has focuser and focus mode isn't fixed
-                if (mHasFocuser && !isFixedFocusMode(afMode)) {
-                    // Make sure the candidate frame has good focus.
-                    entry = frame.find(ANDROID_CONTROL_AF_STATE);
-                    if (entry.count == 0) {
-                        ALOGW("%s: ZSL queue frame has no AF state field!",
-                                __FUNCTION__);
-                        continue;
-                    }
-                    uint8_t afState = entry.data.u8[0];
-                    if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
-                            afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
-                            afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
-                        ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
-                                __FUNCTION__, afState);
-                        continue;
-                    }
-                }
-
-                minTimestamp = frameTimestamp;
-                idx = j;
-            }
-
-            ALOGVV("%s: Saw timestamp %" PRId64, __FUNCTION__, frameTimestamp);
-        }
-    }
-
-    if (emptyCount == mFrameList.size()) {
-        /**
-         * This could be mildly bad and means our ZSL was triggered before
-         * there were any frames yet received by the camera framework.
-         *
-         * This is a fairly corner case which can happen under:
-         * + a user presses the shutter button real fast when the camera starts
-         *     (startPreview followed immediately by takePicture).
-         * + burst capture case (hitting shutter button as fast possible)
-         *
-         * If this happens in steady case (preview running for a while, call
-         *     a single takePicture) then this might be a fwk bug.
-         */
-        ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
-    }
-
-    ALOGV("%s: Candidate timestamp %" PRId64 " (idx %zu), empty frames: %zu",
-          __FUNCTION__, minTimestamp, idx, emptyCount);
-
-    if (metadataIdx) {
-        *metadataIdx = idx;
-    }
-
-    return minTimestamp;
-}
-
-void ZslProcessor3::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
-    // Intentionally left empty
-    // Although theoretically we could use this to get better dump info
-}
-
-void ZslProcessor3::onBufferReleased(const BufferInfo& bufferInfo) {
-
-    // ignore output buffers
-    if (bufferInfo.mOutput) {
-        return;
-    }
-
-    // Lock mutex only once we know this is an input buffer returned to avoid
-    // potential deadlock
-    Mutex::Autolock l(mInputMutex);
-    // TODO: Verify that the buffer is in our queue by looking at timestamp
-    // theoretically unnecessary unless we change the following assumptions:
-    // -- only 1 buffer reprocessed at a time (which is the case now)
-
-    // Erase entire ZSL queue since we've now completed the capture and preview
-    // is stopped.
-    //
-    // We need to guarantee that if we do two back-to-back captures,
-    // the second won't use a buffer that's older/the same as the first, which
-    // is theoretically possible if we don't clear out the queue and the
-    // selection criteria is something like 'newest'. Clearing out the result
-    // metadata queue on a completed capture ensures we'll only use new timestamp.
-    // Calling clearZslQueueLocked is a guaranteed deadlock because this callback
-    // holds the Camera3Stream internal lock (mLock), and clearZslQueueLocked requires
-    // to hold the same lock.
-    // TODO: need figure out a way to clear the Zsl buffer queue properly. Right now
-    // it is safe not to do so, as back to back ZSL capture requires stop and start
-    // preview, which will flush ZSL queue automatically.
-    ALOGV("%s: Memory optimization, clearing ZSL queue",
-          __FUNCTION__);
-    clearZslResultQueueLocked();
-
-    // Required so we accept more ZSL requests
-    mState = RUNNING;
-}
-
-}; // namespace camera2
-}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h b/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
deleted file mode 100644
index 2960478..0000000
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR3_H
-
-#include <utils/Thread.h>
-#include <utils/String16.h>
-#include <utils/Vector.h>
-#include <utils/Mutex.h>
-#include <utils/Condition.h>
-#include <gui/BufferItem.h>
-#include <gui/BufferItemConsumer.h>
-#include <camera/CameraMetadata.h>
-
-#include "api1/client2/FrameProcessor.h"
-#include "api1/client2/ZslProcessorInterface.h"
-#include "device3/Camera3ZslStream.h"
-
-namespace android {
-
-class Camera2Client;
-
-namespace camera2 {
-
-class CaptureSequencer;
-class Parameters;
-
-/***
- * ZSL queue processing
- */
-class ZslProcessor3 :
-                    public ZslProcessorInterface,
-                    public camera3::Camera3StreamBufferListener,
-            virtual public Thread,
-            virtual public FrameProcessor::FilteredListener {
-  public:
-    ZslProcessor3(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
-    ~ZslProcessor3();
-
-    // From FrameProcessor::FilteredListener
-    virtual void onResultAvailable(const CaptureResult &result);
-
-    /**
-     ****************************************
-     * ZslProcessorInterface implementation *
-     ****************************************
-     */
-
-    virtual status_t updateStream(const Parameters &params);
-    virtual status_t deleteStream();
-    virtual int getStreamId() const;
-
-    virtual status_t pushToReprocess(int32_t requestId);
-    virtual status_t clearZslQueue();
-
-    void dump(int fd, const Vector<String16>& args) const;
-
-  protected:
-    /**
-     **********************************************
-     * Camera3StreamBufferListener implementation *
-     **********************************************
-     */
-    typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo;
-    // Buffer was acquired by the HAL
-    virtual void onBufferAcquired(const BufferInfo& bufferInfo);
-    // Buffer was released by the HAL
-    virtual void onBufferReleased(const BufferInfo& bufferInfo);
-
-  private:
-    static const nsecs_t kWaitDuration = 10000000; // 10 ms
-    nsecs_t mLatestClearedBufferTimestamp;
-
-    enum {
-        RUNNING,
-        LOCKED
-    } mState;
-
-    wp<Camera2Client> mClient;
-    wp<CaptureSequencer> mSequencer;
-
-    const int mId;
-
-    mutable Mutex mInputMutex;
-
-    enum {
-        NO_STREAM = -1
-    };
-
-    int mZslStreamId;
-    sp<camera3::Camera3ZslStream> mZslStream;
-
-    struct ZslPair {
-        BufferItem buffer;
-        CameraMetadata frame;
-    };
-
-    static const int32_t kDefaultMaxPipelineDepth = 4;
-    size_t mBufferQueueDepth;
-    size_t mFrameListDepth;
-    Vector<CameraMetadata> mFrameList;
-    size_t mFrameListHead;
-
-    ZslPair mNextPair;
-
-    Vector<ZslPair> mZslQueue;
-    size_t mZslQueueHead;
-    size_t mZslQueueTail;
-
-    CameraMetadata mLatestCapturedRequest;
-
-    bool mHasFocuser;
-
-    virtual bool threadLoop();
-
-    status_t clearZslQueueLocked();
-
-    void clearZslResultQueueLocked();
-
-    void dumpZslQueue(int id) const;
-
-    nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
-
-    bool isFixedFocusMode(uint8_t afMode) const;
-
-    // Update the post-processing metadata with the default still capture request template
-    status_t updateRequestWithDefaultStillRequest(CameraMetadata &request) const;
-};
-
-
-}; //namespace camera2
-}; //namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h b/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
deleted file mode 100644
index 9e266e7..0000000
--- a/services/camera/libcameraservice/api1/client2/ZslProcessorInterface.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSORINTERFACE_H
-
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-#include <utils/String16.h>
-#include <utils/Vector.h>
-
-namespace android {
-namespace camera2 {
-
-class Parameters;
-
-class ZslProcessorInterface : virtual public RefBase {
-public:
-
-    // Get ID for use with android.request.outputStreams / inputStreams
-    virtual int getStreamId() const = 0;
-
-    // Update the streams by recreating them if the size/format has changed
-    virtual status_t updateStream(const Parameters& params) = 0;
-
-    // Delete the underlying CameraDevice streams
-    virtual status_t deleteStream() = 0;
-
-    // Clear any additional state necessary before the CameraDevice is disconnected
-    virtual status_t disconnect();
-
-    /**
-     * Submits a ZSL capture request (id = requestId)
-     *
-     * An appropriate ZSL buffer is selected by the closest timestamp,
-     * then we push that buffer to be reprocessed by the HAL.
-     * A capture request is created and submitted on behalf of the client.
-     */
-    virtual status_t pushToReprocess(int32_t requestId) = 0;
-
-    // Flush the ZSL buffer queue, freeing up all the buffers
-    virtual status_t clearZslQueue() = 0;
-
-    // (Debugging only) Dump the current state to the specified file descriptor
-    virtual void dump(int fd, const Vector<String16>& args) const = 0;
-};
-
-}; //namespace camera2
-}; //namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index bd9fea3..dbec34e 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -28,14 +28,23 @@
 #include "common/CameraDeviceBase.h"
 #include "api2/CameraDeviceClient.h"
 
+// Convenience methods for constructing binder::Status objects for error returns
 
+#define STATUS_ERROR(errorCode, errorString) \
+    binder::Status::fromServiceSpecificError(errorCode, \
+            String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+    binder::Status::fromServiceSpecificError(errorCode, \
+            String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, \
+                    __VA_ARGS__))
 
 namespace android {
 using namespace camera2;
 
 CameraDeviceClientBase::CameraDeviceClientBase(
         const sp<CameraService>& cameraService,
-        const sp<ICameraDeviceCallbacks>& remoteCallback,
+        const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
         const String16& clientPackageName,
         int cameraId,
         int cameraFacing,
@@ -56,16 +65,17 @@
 // Interface used by CameraService
 
 CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService,
-                                   const sp<ICameraDeviceCallbacks>& remoteCallback,
-                                   const String16& clientPackageName,
-                                   int cameraId,
-                                   int cameraFacing,
-                                   int clientPid,
-                                   uid_t clientUid,
-                                   int servicePid) :
+        const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
+        const String16& clientPackageName,
+        int cameraId,
+        int cameraFacing,
+        int clientPid,
+        uid_t clientUid,
+        int servicePid) :
     Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
                 cameraId, cameraFacing, clientPid, clientUid, servicePid),
     mInputStream(),
+    mStreamingRequestId(REQUEST_ID_NONE),
     mRequestIdCounter(0) {
 
     ATRACE_CALL();
@@ -98,68 +108,77 @@
 CameraDeviceClient::~CameraDeviceClient() {
 }
 
-status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request,
-                                         bool streaming,
-                                         /*out*/
-                                         int64_t* lastFrameNumber) {
-    List<sp<CaptureRequest> > requestList;
-    requestList.push_back(request);
-    return submitRequestList(requestList, streaming, lastFrameNumber);
+binder::Status CameraDeviceClient::submitRequest(
+        const hardware::camera2::CaptureRequest& request,
+        bool streaming,
+        /*out*/
+        hardware::camera2::utils::SubmitInfo *submitInfo) {
+    std::vector<hardware::camera2::CaptureRequest> requestList = { request };
+    return submitRequestList(requestList, streaming, submitInfo);
 }
 
-status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > requests,
-                                               bool streaming, int64_t* lastFrameNumber) {
+binder::Status CameraDeviceClient::submitRequestList(
+        const std::vector<hardware::camera2::CaptureRequest>& requests,
+        bool streaming,
+        /*out*/
+        hardware::camera2::utils::SubmitInfo *submitInfo) {
     ATRACE_CALL();
     ALOGV("%s-start of function. Request list size %zu", __FUNCTION__, requests.size());
 
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res = binder::Status::ok();
+    status_t err;
+    if ( !(res = checkPidStatus(__FUNCTION__) ).isOk()) {
+        return res;
+    }
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    if (!mDevice.get()) return DEAD_OBJECT;
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
 
     if (requests.empty()) {
         ALOGE("%s: Camera %d: Sent null request. Rejecting request.",
               __FUNCTION__, mCameraId);
-        return BAD_VALUE;
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Empty request list");
     }
 
     List<const CameraMetadata> metadataRequestList;
-    int32_t requestId = mRequestIdCounter;
+    submitInfo->mRequestId = mRequestIdCounter;
     uint32_t loopCounter = 0;
 
-    for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) {
-        sp<CaptureRequest> request = *it;
-        if (request == 0) {
-            ALOGE("%s: Camera %d: Sent null request.",
-                    __FUNCTION__, mCameraId);
-            return BAD_VALUE;
-        } else if (request->mIsReprocess) {
+    for (auto&& request: requests) {
+        if (request.mIsReprocess) {
             if (!mInputStream.configured) {
                 ALOGE("%s: Camera %d: no input stream is configured.", __FUNCTION__, mCameraId);
-                return BAD_VALUE;
+                return STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                        "No input configured for camera %d but request is for reprocessing",
+                        mCameraId);
             } else if (streaming) {
                 ALOGE("%s: Camera %d: streaming reprocess requests not supported.", __FUNCTION__,
                         mCameraId);
-                return BAD_VALUE;
+                return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                        "Repeating reprocess requests not supported");
             }
         }
 
-        CameraMetadata metadata(request->mMetadata);
+        CameraMetadata metadata(request.mMetadata);
         if (metadata.isEmpty()) {
             ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
                    __FUNCTION__, mCameraId);
-            return BAD_VALUE;
-        } else if (request->mSurfaceList.isEmpty()) {
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                    "Request settings are empty");
+        } else if (request.mSurfaceList.isEmpty()) {
             ALOGE("%s: Camera %d: Requests must have at least one surface target. "
-                  "Rejecting request.", __FUNCTION__, mCameraId);
-            return BAD_VALUE;
+                    "Rejecting request.", __FUNCTION__, mCameraId);
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                    "Request has no output targets");
         }
 
         if (!enforceRequestPermissions(metadata)) {
             // Callee logs
-            return PERMISSION_DENIED;
+            return STATUS_ERROR(CameraService::ERROR_PERMISSION_DENIED,
+                    "Caller does not have permission to change restricted controls");
         }
 
         /**
@@ -167,9 +186,8 @@
          * the capture request's list of surface targets
          */
         Vector<int32_t> outputStreamIds;
-        outputStreamIds.setCapacity(request->mSurfaceList.size());
-        for (size_t i = 0; i < request->mSurfaceList.size(); ++i) {
-            sp<Surface> surface = request->mSurfaceList[i];
+        outputStreamIds.setCapacity(request.mSurfaceList.size());
+        for (sp<Surface> surface : request.mSurfaceList) {
             if (surface == 0) continue;
 
             sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
@@ -178,105 +196,124 @@
             // Trying to submit request with surface that wasn't created
             if (idx == NAME_NOT_FOUND) {
                 ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
-                      " we have not called createStream on",
-                      __FUNCTION__, mCameraId);
-                return BAD_VALUE;
+                        " we have not called createStream on",
+                        __FUNCTION__, mCameraId);
+                return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                        "Request targets Surface that is not part of current capture session");
             }
 
             int streamId = mStreamMap.valueAt(idx);
             outputStreamIds.push_back(streamId);
             ALOGV("%s: Camera %d: Appending output stream %d to request",
-                  __FUNCTION__, mCameraId, streamId);
+                    __FUNCTION__, mCameraId, streamId);
         }
 
         metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
                         outputStreamIds.size());
 
-        if (request->mIsReprocess) {
+        if (request.mIsReprocess) {
             metadata.update(ANDROID_REQUEST_INPUT_STREAMS, &mInputStream.id, 1);
         }
 
-        metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
+        metadata.update(ANDROID_REQUEST_ID, &(submitInfo->mRequestId), /*size*/1);
         loopCounter++; // loopCounter starts from 1
         ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)",
-              __FUNCTION__, mCameraId, requestId, loopCounter, requests.size());
+              __FUNCTION__, mCameraId, submitInfo->mRequestId, loopCounter, requests.size());
 
         metadataRequestList.push_back(metadata);
     }
     mRequestIdCounter++;
 
     if (streaming) {
-        res = mDevice->setStreamingRequestList(metadataRequestList, lastFrameNumber);
-        if (res != OK) {
-            ALOGE("%s: Camera %d:  Got error %d after trying to set streaming "
-                  "request", __FUNCTION__, mCameraId, res);
+        err = mDevice->setStreamingRequestList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+        if (err != OK) {
+            String8 msg = String8::format(
+                "Camera %d:  Got error %s (%d) after trying to set streaming request",
+                mCameraId, strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
+                    msg.string());
         } else {
-            mStreamingRequestList.push_back(requestId);
+            Mutex::Autolock idLock(mStreamingRequestIdLock);
+            mStreamingRequestId = submitInfo->mRequestId;
         }
     } else {
-        res = mDevice->captureList(metadataRequestList, lastFrameNumber);
-        if (res != OK) {
-            ALOGE("%s: Camera %d: Got error %d after trying to set capture",
-                __FUNCTION__, mCameraId, res);
+        err = mDevice->captureList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+        if (err != OK) {
+            String8 msg = String8::format(
+                "Camera %d: Got error %s (%d) after trying to submit capture request",
+                mCameraId, strerror(-err), err);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
+                    msg.string());
         }
-        ALOGV("%s: requestId = %d ", __FUNCTION__, requestId);
+        ALOGV("%s: requestId = %d ", __FUNCTION__, submitInfo->mRequestId);
     }
 
     ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId);
-    if (res == OK) {
-        return requestId;
-    }
-
     return res;
 }
 
-status_t CameraDeviceClient::cancelRequest(int requestId, int64_t* lastFrameNumber) {
+binder::Status CameraDeviceClient::cancelRequest(
+        int requestId,
+        /*out*/
+        int64_t* lastFrameNumber) {
     ATRACE_CALL();
     ALOGV("%s, requestId = %d", __FUNCTION__, requestId);
 
-    status_t res;
+    status_t err;
+    binder::Status res;
 
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    Vector<int>::iterator it, end;
-    for (it = mStreamingRequestList.begin(), end = mStreamingRequestList.end();
-         it != end; ++it) {
-        if (*it == requestId) {
-            break;
-        }
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
     }
 
-    if (it == end) {
-        ALOGE("%s: Camera%d: Did not find request id %d in list of streaming "
-              "requests", __FUNCTION__, mCameraId, requestId);
-        return BAD_VALUE;
+    Mutex::Autolock idLock(mStreamingRequestIdLock);
+    if (mStreamingRequestId != requestId) {
+        String8 msg = String8::format("Camera %d: Canceling request ID %d doesn't match "
+                "current request ID %d", mCameraId, requestId, mStreamingRequestId);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
-    res = mDevice->clearStreamingRequest(lastFrameNumber);
+    err = mDevice->clearStreamingRequest(lastFrameNumber);
 
-    if (res == OK) {
+    if (err == OK) {
         ALOGV("%s: Camera %d: Successfully cleared streaming request",
               __FUNCTION__, mCameraId);
-        mStreamingRequestList.erase(it);
+        mStreamingRequestId = REQUEST_ID_NONE;
+    } else {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error clearing streaming request: %s (%d)",
+                mCameraId, strerror(-err), err);
     }
 
     return res;
 }
 
-status_t CameraDeviceClient::beginConfigure() {
+binder::Status CameraDeviceClient::beginConfigure() {
     // TODO: Implement this.
     ALOGV("%s: Not implemented yet.", __FUNCTION__);
-    return OK;
+    return binder::Status::ok();
 }
 
-status_t CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
+binder::Status CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
     ALOGV("%s: ending configure (%d input stream, %zu output streams)",
             __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
 
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
+
     // Sanitize the high speed session against necessary capability bit.
     if (isConstrainedHighSpeed) {
         CameraMetadata staticInfo = mDevice->info();
@@ -290,33 +327,41 @@
             }
         }
         if (!isConstrainedHighSpeedSupported) {
-            ALOGE("%s: Camera %d: Try to create a constrained high speed configuration on a device"
-                    " that doesn't support it.",
-                          __FUNCTION__, mCameraId);
-            return INVALID_OPERATION;
+            String8 msg = String8::format(
+                "Camera %d: Try to create a constrained high speed configuration on a device"
+                " that doesn't support it.", mCameraId);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                    msg.string());
         }
     }
 
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    status_t err = mDevice->configureStreams(isConstrainedHighSpeed);
+    if (err == BAD_VALUE) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "Camera %d: Unsupported set of inputs/outputs provided",
+                mCameraId);
+    } else if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error configuring streams: %s (%d)",
+                mCameraId, strerror(-err), err);
+    }
 
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    return mDevice->configureStreams(isConstrainedHighSpeed);
+    return res;
 }
 
-status_t CameraDeviceClient::deleteStream(int streamId) {
+binder::Status CameraDeviceClient::deleteStream(int streamId) {
     ATRACE_CALL();
     ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
 
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    if (!mDevice.get()) return DEAD_OBJECT;
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
 
     bool isInput = false;
     ssize_t index = NAME_NOT_FOUND;
@@ -333,20 +378,22 @@
         }
 
         if (index == NAME_NOT_FOUND) {
-            ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
-                  "created yet", __FUNCTION__, mCameraId, streamId);
-            return BAD_VALUE;
+            String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no such "
+                    "stream created yet", mCameraId, streamId);
+            ALOGW("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
         }
     }
 
     // Also returns BAD_VALUE if stream ID was not valid
-    res = mDevice->deleteStream(streamId);
+    status_t err = mDevice->deleteStream(streamId);
 
-    if (res == BAD_VALUE) {
-        ALOGE("%s: Camera %d: Unexpected BAD_VALUE when deleting stream, but we"
-              " already checked and the stream ID (%d) should be valid.",
-              __FUNCTION__, mCameraId, streamId);
-    } else if (res == OK) {
+    if (err != OK) {
+        String8 msg = String8::format("Camera %d: Unexpected error %s (%d) when deleting stream %d",
+                mCameraId, strerror(-err), err, streamId);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+    } else {
         if (isInput) {
             mInputStream.configured = false;
         } else {
@@ -357,44 +404,50 @@
     return res;
 }
 
-status_t CameraDeviceClient::createStream(const OutputConfiguration &outputConfiguration)
-{
+binder::Status CameraDeviceClient::createStream(
+        const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+        /*out*/
+        int32_t* newStreamId) {
     ATRACE_CALL();
 
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-
     sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
     if (bufferProducer == NULL) {
         ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
-        return BAD_VALUE;
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
     }
-    if (!mDevice.get()) return DEAD_OBJECT;
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
 
     // Don't create multiple streams for the same target surface
     {
         ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
         if (index != NAME_NOT_FOUND) {
-            ALOGW("%s: Camera %d: Buffer producer already has a stream for it "
-                  "(ID %zd)",
-                  __FUNCTION__, mCameraId, index);
-            return ALREADY_EXISTS;
+            String8 msg = String8::format("Camera %d: Surface already has a stream created for it "
+                    "(ID %zd)", mCameraId, index);
+            ALOGW("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
         }
     }
 
+    status_t err;
+
     // HACK b/10949105
     // Query consumer usage bits to set async operation mode for
     // GLConsumer using controlledByApp parameter.
     bool useAsync = false;
     int32_t consumerUsage;
-    if ((res = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+    if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
             &consumerUsage)) != OK) {
-        ALOGE("%s: Camera %d: Failed to query consumer usage", __FUNCTION__,
-              mCameraId);
-        return res;
+        String8 msg = String8::format("Camera %d: Failed to query Surface consumer usage: %s (%d)",
+                mCameraId, strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
     if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
         ALOGW("%s: Camera %d: Forcing asynchronous mode for stream",
@@ -417,26 +470,30 @@
     int width, height, format;
     android_dataspace dataSpace;
 
-    if ((res = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
-        ALOGE("%s: Camera %d: Failed to query Surface width", __FUNCTION__,
-              mCameraId);
-        return res;
+    if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+        String8 msg = String8::format("Camera %d: Failed to query Surface width: %s (%d)",
+                mCameraId, strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
-    if ((res = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
-        ALOGE("%s: Camera %d: Failed to query Surface height", __FUNCTION__,
-              mCameraId);
-        return res;
+    if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+        String8 msg = String8::format("Camera %d: Failed to query Surface height: %s (%d)",
+                mCameraId, strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
-    if ((res = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
-        ALOGE("%s: Camera %d: Failed to query Surface format", __FUNCTION__,
-              mCameraId);
-        return res;
+    if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+        String8 msg = String8::format("Camera %d: Failed to query Surface format: %s (%d)",
+                mCameraId, strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
-    if ((res = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+    if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
                             reinterpret_cast<int*>(&dataSpace))) != OK) {
-        ALOGE("%s: Camera %d: Failed to query Surface dataSpace", __FUNCTION__,
-              mCameraId);
-        return res;
+        String8 msg = String8::format("Camera %d: Failed to query Surface dataspace: %s (%d)",
+                mCameraId, strerror(-err), err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
 
     // FIXME: remove this override since the default format should be
@@ -451,18 +508,22 @@
     // Round dimensions to the nearest dimensions available for this format
     if (flexibleConsumer && !CameraDeviceClient::roundBufferDimensionNearest(width, height,
             format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
-        ALOGE("%s: No stream configurations with the format %#x defined, failed to create stream.",
-                __FUNCTION__, format);
-        return BAD_VALUE;
+        String8 msg = String8::format("Camera %d: No supported stream configurations with "
+                "format %#x defined, failed to create output stream", mCameraId, format);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
-    int streamId = -1;
-    res = mDevice->createStream(surface, width, height, format, dataSpace,
-                                static_cast<camera3_stream_rotation_t>
-                                        (outputConfiguration.getRotation()),
-                                &streamId);
+    int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
+    err = mDevice->createStream(surface, width, height, format, dataSpace,
+            static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+            &streamId, outputConfiguration.getSurfaceSetID());
 
-    if (res == OK) {
+    if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
+                mCameraId, width, height, format, dataSpace, strerror(-err), err);
+    } else {
         mStreamMap.add(binder, streamId);
 
         ALOGV("%s: Camera %d: Successfully created a new stream ID %d",
@@ -473,49 +534,56 @@
          * rotate the camera stream for preview use cases.
          */
         int32_t transform = 0;
-        res = getRotationTransformLocked(&transform);
+        err = getRotationTransformLocked(&transform);
 
-        if (res != OK) {
+        if (err != OK) {
             // Error logged by getRotationTransformLocked.
-            return res;
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
+                    "Unable to calculate rotation transform for new stream");
         }
 
-        res = mDevice->setStreamTransform(streamId, transform);
-        if (res != OK) {
-            ALOGE("%s: Failed to set stream transform (stream id %d)",
-                  __FUNCTION__, streamId);
-            return res;
+        err = mDevice->setStreamTransform(streamId, transform);
+        if (err != OK) {
+            String8 msg = String8::format("Failed to set stream transform (stream id %d)",
+                    streamId);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
         }
 
-        return streamId;
+        *newStreamId = streamId;
     }
 
     return res;
 }
 
 
-status_t CameraDeviceClient::createInputStream(int width, int height,
-        int format) {
+binder::Status CameraDeviceClient::createInputStream(
+        int width, int height, int format,
+        /*out*/
+        int32_t* newStreamId) {
 
     ATRACE_CALL();
     ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
 
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
-    if (!mDevice.get()) return DEAD_OBJECT;
+
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
 
     if (mInputStream.configured) {
-        ALOGE("%s: Camera %d: Already has an input stream "
-                " configuration. (ID %zd)", __FUNCTION__, mCameraId,
-                mInputStream.id);
-        return ALREADY_EXISTS;
+        String8 msg = String8::format("Camera %d: Already has an input stream "
+                "configured (ID %zd)", mCameraId, mInputStream.id);
+        ALOGE("%s: %s", __FUNCTION__, msg.string() );
+        return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
     }
 
     int streamId = -1;
-    res = mDevice->createInputStream(width, height, format, &streamId);
-    if (res == OK) {
+    status_t err = mDevice->createInputStream(width, height, format, &streamId);
+    if (err == OK) {
         mInputStream.configured = true;
         mInputStream.width = width;
         mInputStream.height = height;
@@ -523,27 +591,42 @@
         mInputStream.id = streamId;
 
         ALOGV("%s: Camera %d: Successfully created a new input stream ID %d",
-              __FUNCTION__, mCameraId, streamId);
+                __FUNCTION__, mCameraId, streamId);
 
-        return streamId;
+        *newStreamId = streamId;
+    } else {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error creating new input stream: %s (%d)", mCameraId,
+                strerror(-err), err);
     }
 
     return res;
 }
 
-status_t CameraDeviceClient::getInputBufferProducer(
-        /*out*/sp<IGraphicBufferProducer> *producer) {
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+binder::Status CameraDeviceClient::getInputSurface(/*out*/ view::Surface *inputSurface) {
 
-    if (producer == NULL) {
-        return BAD_VALUE;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+    if (inputSurface == NULL) {
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Null input surface");
     }
 
     Mutex::Autolock icl(mBinderSerializationLock);
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    return mDevice->getInputBufferProducer(producer);
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
+    sp<IGraphicBufferProducer> producer;
+    status_t err = mDevice->getInputBufferProducer(&producer);
+    if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error getting input Surface: %s (%d)",
+                mCameraId, strerror(-err), err);
+    } else {
+        inputSurface->name = String16("CameraInput");
+        inputSurface->graphicBufferProducer = producer;
+    }
+    return res;
 }
 
 bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
@@ -604,42 +687,57 @@
 }
 
 // Create a request object from a template.
-status_t CameraDeviceClient::createDefaultRequest(int templateId,
-                                                  /*out*/
-                                                  CameraMetadata* request)
+binder::Status CameraDeviceClient::createDefaultRequest(int templateId,
+        /*out*/
+        hardware::camera2::impl::CameraMetadataNative* request)
 {
     ATRACE_CALL();
     ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId);
 
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    if (!mDevice.get()) return DEAD_OBJECT;
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
 
     CameraMetadata metadata;
-    if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK &&
+    status_t err;
+    if ( (err = mDevice->createDefaultRequest(templateId, &metadata) ) == OK &&
         request != NULL) {
 
         request->swap(metadata);
-    }
+    } else if (err == BAD_VALUE) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "Camera %d: Template ID %d is invalid or not supported: %s (%d)",
+                mCameraId, templateId, strerror(-err), err);
 
+    } else {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error creating default request for template %d: %s (%d)",
+                mCameraId, templateId, strerror(-err), err);
+    }
     return res;
 }
 
-status_t CameraDeviceClient::getCameraInfo(/*out*/CameraMetadata* info)
+binder::Status CameraDeviceClient::getCameraInfo(
+        /*out*/
+        hardware::camera2::impl::CameraMetadataNative* info)
 {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
-    status_t res = OK;
+    binder::Status res;
 
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    if (!mDevice.get()) return DEAD_OBJECT;
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
 
     if (info != NULL) {
         *info = mDevice->info(); // static camera metadata
@@ -649,51 +747,70 @@
     return res;
 }
 
-status_t CameraDeviceClient::waitUntilIdle()
+binder::Status CameraDeviceClient::waitUntilIdle()
 {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
-    status_t res = OK;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    if (!mDevice.get()) return DEAD_OBJECT;
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
 
     // FIXME: Also need check repeating burst.
-    if (!mStreamingRequestList.isEmpty()) {
-        ALOGE("%s: Camera %d: Try to waitUntilIdle when there are active streaming requests",
-              __FUNCTION__, mCameraId);
-        return INVALID_OPERATION;
+    Mutex::Autolock idLock(mStreamingRequestIdLock);
+    if (mStreamingRequestId != REQUEST_ID_NONE) {
+        String8 msg = String8::format(
+            "Camera %d: Try to waitUntilIdle when there are active streaming requests",
+            mCameraId);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
     }
-    res = mDevice->waitUntilDrained();
+    status_t err = mDevice->waitUntilDrained();
+    if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error waiting to drain: %s (%d)",
+                mCameraId, strerror(-err), err);
+    }
     ALOGV("%s Done", __FUNCTION__);
-
     return res;
 }
 
-status_t CameraDeviceClient::flush(int64_t* lastFrameNumber) {
+binder::Status CameraDeviceClient::flush(
+        /*out*/
+        int64_t* lastFrameNumber) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
-    status_t res = OK;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
-    if (!mDevice.get()) return DEAD_OBJECT;
+    if (!mDevice.get()) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+    }
 
-    mStreamingRequestList.clear();
-    return mDevice->flush(lastFrameNumber);
+    Mutex::Autolock idLock(mStreamingRequestIdLock);
+    mStreamingRequestId = REQUEST_ID_NONE;
+    status_t err = mDevice->flush(lastFrameNumber);
+    if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error flushing device: %s (%d)", mCameraId, strerror(-err), err);
+    }
+    return res;
 }
 
-status_t CameraDeviceClient::prepare(int streamId) {
+binder::Status CameraDeviceClient::prepare(int streamId) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
-    status_t res = OK;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
@@ -707,24 +824,33 @@
     }
 
     if (index == NAME_NOT_FOUND) {
-        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
-              "created yet", __FUNCTION__, mCameraId, streamId);
-        return BAD_VALUE;
+        String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
+              "with that ID exists", mCameraId, streamId);
+        ALOGW("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
     // Also returns BAD_VALUE if stream ID was not valid, or stream already
     // has been used
-    res = mDevice->prepare(streamId);
-
+    status_t err = mDevice->prepare(streamId);
+    if (err == BAD_VALUE) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "Camera %d: Stream %d has already been used, and cannot be prepared",
+                mCameraId, streamId);
+    } else if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error preparing stream %d: %s (%d)", mCameraId, streamId,
+                strerror(-err), err);
+    }
     return res;
 }
 
-status_t CameraDeviceClient::prepare2(int maxCount, int streamId) {
+binder::Status CameraDeviceClient::prepare2(int maxCount, int streamId) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
-    status_t res = OK;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
@@ -738,30 +864,41 @@
     }
 
     if (index == NAME_NOT_FOUND) {
-        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream created yet",
-                __FUNCTION__, mCameraId, streamId);
-        return BAD_VALUE;
+        String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
+              "with that ID exists", mCameraId, streamId);
+        ALOGW("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
     if (maxCount <= 0) {
-        ALOGE("%s: Camera %d: Invalid maxCount (%d) specified, must be greater than 0.",
-                __FUNCTION__, mCameraId, maxCount);
-        return BAD_VALUE;
+        String8 msg = String8::format("Camera %d: maxCount (%d) must be greater than 0",
+                mCameraId, maxCount);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
     // Also returns BAD_VALUE if stream ID was not valid, or stream already
     // has been used
-    res = mDevice->prepare(maxCount, streamId);
+    status_t err = mDevice->prepare(maxCount, streamId);
+    if (err == BAD_VALUE) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "Camera %d: Stream %d has already been used, and cannot be prepared",
+                mCameraId, streamId);
+    } else if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error preparing stream %d: %s (%d)", mCameraId, streamId,
+                strerror(-err), err);
+    }
 
     return res;
 }
 
-status_t CameraDeviceClient::tearDown(int streamId) {
+binder::Status CameraDeviceClient::tearDown(int streamId) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
 
-    status_t res = OK;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
@@ -775,14 +912,24 @@
     }
 
     if (index == NAME_NOT_FOUND) {
-        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
-              "created yet", __FUNCTION__, mCameraId, streamId);
-        return BAD_VALUE;
+        String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
+              "with that ID exists", mCameraId, streamId);
+        ALOGW("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
     }
 
     // Also returns BAD_VALUE if stream ID was not valid or if the stream is in
     // use
-    res = mDevice->tearDown(streamId);
+    status_t err = mDevice->tearDown(streamId);
+    if (err == BAD_VALUE) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+                "Camera %d: Stream %d is still in use, cannot be torn down",
+                mCameraId, streamId);
+    } else if (err != OK) {
+        res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+                "Camera %d: Error tearing down stream %d: %s (%d)", mCameraId, streamId,
+                strerror(-err), err);
+    }
 
     return res;
 }
@@ -822,19 +969,30 @@
     return dumpDevice(fd, args);
 }
 
-void CameraDeviceClient::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+void CameraDeviceClient::notifyError(int32_t errorCode,
                                      const CaptureResultExtras& resultExtras) {
     // Thread safe. Don't bother locking.
-    sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+    sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
 
     if (remoteCb != 0) {
         remoteCb->onDeviceError(errorCode, resultExtras);
     }
 }
 
+void CameraDeviceClient::notifyRepeatingRequestError(long lastFrameNumber) {
+    sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+
+    if (remoteCb != 0) {
+        remoteCb->onRepeatingRequestError(lastFrameNumber);
+    }
+
+    Mutex::Autolock idLock(mStreamingRequestIdLock);
+    mStreamingRequestId = REQUEST_ID_NONE;
+}
+
 void CameraDeviceClient::notifyIdle() {
     // Thread safe. Don't bother locking.
-    sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+    sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
 
     if (remoteCb != 0) {
         remoteCb->onDeviceIdle();
@@ -845,7 +1003,7 @@
 void CameraDeviceClient::notifyShutter(const CaptureResultExtras& resultExtras,
         nsecs_t timestamp) {
     // Thread safe. Don't bother locking.
-    sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+    sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
     if (remoteCb != 0) {
         remoteCb->onCaptureStarted(resultExtras, timestamp);
     }
@@ -854,7 +1012,7 @@
 
 void CameraDeviceClient::notifyPrepared(int streamId) {
     // Thread safe. Don't bother locking.
-    sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+    sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
     if (remoteCb != 0) {
         remoteCb->onPrepared(streamId);
     }
@@ -893,12 +1051,23 @@
     ALOGV("%s", __FUNCTION__);
 
     // Thread-safe. No lock necessary.
-    sp<ICameraDeviceCallbacks> remoteCb = mRemoteCallback;
+    sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = mRemoteCallback;
     if (remoteCb != NULL) {
         remoteCb->onResultReceived(result.mMetadata, result.mResultExtras);
     }
 }
 
+binder::Status CameraDeviceClient::checkPidStatus(const char* checkLocation) {
+    if (mDisconnected) {
+        return STATUS_ERROR(CameraService::ERROR_DISCONNECTED,
+                "The camera device has been disconnected");
+    }
+    status_t res = checkPid(checkLocation);
+    return (res == OK) ? binder::Status::ok() :
+            STATUS_ERROR(CameraService::ERROR_PERMISSION_DENIED,
+                    "Attempt to use camera from a different process than original client");
+}
+
 // TODO: move to Camera2ClientBase
 bool CameraDeviceClient::enforceRequestPermissions(CameraMetadata& metadata) {
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index b1d1762..d792b7d 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -17,9 +17,10 @@
 #ifndef ANDROID_SERVERS_CAMERA_PHOTOGRAPHY_CAMERADEVICECLIENT_H
 #define ANDROID_SERVERS_CAMERA_PHOTOGRAPHY_CAMERADEVICECLIENT_H
 
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
+#include <android/hardware/camera2/BnCameraDeviceUser.h>
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
 #include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SubmitInfo.h>
 
 #include "CameraService.h"
 #include "common/FrameProcessorBase.h"
@@ -27,17 +28,19 @@
 
 namespace android {
 
-struct CameraDeviceClientBase : public CameraService::BasicClient, public BnCameraDeviceUser
+struct CameraDeviceClientBase :
+         public CameraService::BasicClient,
+         public hardware::camera2::BnCameraDeviceUser
 {
-    typedef ICameraDeviceCallbacks TCamCallbacks;
+    typedef hardware::camera2::ICameraDeviceCallbacks TCamCallbacks;
 
-    const sp<ICameraDeviceCallbacks>& getRemoteCallback() {
+    const sp<hardware::camera2::ICameraDeviceCallbacks>& getRemoteCallback() {
         return mRemoteCallback;
     }
 
 protected:
     CameraDeviceClientBase(const sp<CameraService>& cameraService,
-            const sp<ICameraDeviceCallbacks>& remoteCallback,
+            const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
             const String16& clientPackageName,
             int cameraId,
             int cameraFacing,
@@ -45,7 +48,7 @@
             uid_t clientUid,
             int servicePid);
 
-    sp<ICameraDeviceCallbacks> mRemoteCallback;
+    sp<hardware::camera2::ICameraDeviceCallbacks> mRemoteCallback;
 };
 
 /**
@@ -63,66 +66,77 @@
      */
 
     // Note that the callee gets a copy of the metadata.
-    virtual status_t           submitRequest(sp<CaptureRequest> request,
-                                             bool streaming = false,
-                                             /*out*/
-                                             int64_t* lastFrameNumber = NULL);
+    virtual binder::Status submitRequest(
+            const hardware::camera2::CaptureRequest& request,
+            bool streaming = false,
+            /*out*/
+            hardware::camera2::utils::SubmitInfo *submitInfo = nullptr);
     // List of requests are copied.
-    virtual status_t           submitRequestList(List<sp<CaptureRequest> > requests,
-                                                 bool streaming = false,
-                                                 /*out*/
-                                                 int64_t* lastFrameNumber = NULL);
-    virtual status_t      cancelRequest(int requestId,
-                                        /*out*/
-                                        int64_t* lastFrameNumber = NULL);
+    virtual binder::Status submitRequestList(
+            const std::vector<hardware::camera2::CaptureRequest>& requests,
+            bool streaming = false,
+            /*out*/
+            hardware::camera2::utils::SubmitInfo *submitInfo = nullptr);
+    virtual binder::Status cancelRequest(int requestId,
+            /*out*/
+            int64_t* lastFrameNumber = NULL);
 
-    virtual status_t beginConfigure();
+    virtual binder::Status beginConfigure();
 
-    virtual status_t endConfigure(bool isConstrainedHighSpeed = false);
+    virtual binder::Status endConfigure(bool isConstrainedHighSpeed = false);
 
     // Returns -EBUSY if device is not idle
-    virtual status_t      deleteStream(int streamId);
+    virtual binder::Status deleteStream(int streamId);
 
-    virtual status_t      createStream(const OutputConfiguration &outputConfiguration);
+    virtual binder::Status createStream(
+            const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+            /*out*/
+            int32_t* newStreamId = NULL);
 
     // Create an input stream of width, height, and format.
-    virtual status_t      createInputStream(int width, int height, int format);
+    virtual binder::Status createInputStream(int width, int height, int format,
+            /*out*/
+            int32_t* newStreamId = NULL);
 
     // Get the buffer producer of the input stream
-    virtual status_t      getInputBufferProducer(
-                                /*out*/sp<IGraphicBufferProducer> *producer);
+    virtual binder::Status getInputSurface(
+            /*out*/
+            view::Surface *inputSurface);
 
     // Create a request object from a template.
-    virtual status_t      createDefaultRequest(int templateId,
-                                               /*out*/
-                                               CameraMetadata* request);
+    virtual binder::Status createDefaultRequest(int templateId,
+            /*out*/
+            hardware::camera2::impl::CameraMetadataNative* request);
 
     // Get the static metadata for the camera
     // -- Caller owns the newly allocated metadata
-    virtual status_t      getCameraInfo(/*out*/CameraMetadata* info);
+    virtual binder::Status getCameraInfo(
+            /*out*/
+            hardware::camera2::impl::CameraMetadataNative* cameraCharacteristics);
 
     // Wait until all the submitted requests have finished processing
-    virtual status_t      waitUntilIdle();
+    virtual binder::Status waitUntilIdle();
 
     // Flush all active and pending requests as fast as possible
-    virtual status_t      flush(/*out*/
-                                int64_t* lastFrameNumber = NULL);
+    virtual binder::Status flush(
+            /*out*/
+            int64_t* lastFrameNumber = NULL);
 
     // Prepare stream by preallocating its buffers
-    virtual status_t      prepare(int streamId);
+    virtual binder::Status prepare(int32_t streamId);
 
     // Tear down stream resources by freeing its unused buffers
-    virtual status_t      tearDown(int streamId);
+    virtual binder::Status tearDown(int32_t streamId);
 
     // Prepare stream by preallocating up to maxCount of its buffers
-    virtual status_t      prepare2(int maxCount, int streamId);
+    virtual binder::Status prepare2(int32_t maxCount, int32_t streamId);
 
     /**
      * Interface used by CameraService
      */
 
     CameraDeviceClient(const sp<CameraService>& cameraService,
-            const sp<ICameraDeviceCallbacks>& remoteCallback,
+            const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
             const String16& clientPackageName,
             int cameraId,
             int cameraFacing,
@@ -142,10 +156,11 @@
      */
 
     virtual void notifyIdle();
-    virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+    virtual void notifyError(int32_t errorCode,
                              const CaptureResultExtras& resultExtras);
     virtual void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp);
     virtual void notifyPrepared(int streamId);
+    virtual void notifyRepeatingRequestError(long lastFrameNumber);
 
     /**
      * Interface used by independent components of CameraDeviceClient.
@@ -167,6 +182,7 @@
     static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
 
     /** Utility members */
+    binder::Status checkPidStatus(const char* checkLocation);
     bool enforceRequestPermissions(CameraMetadata& metadata);
 
     // Find the square of the euclidean distance between two points
@@ -190,8 +206,10 @@
         int32_t id;
     } mInputStream;
 
-    // Request ID
-    Vector<int> mStreamingRequestList;
+    // Streaming request ID
+    int32_t mStreamingRequestId;
+    Mutex mStreamingRequestIdLock;
+    static const int32_t REQUEST_ID_NONE = -1;
 
     int32_t mRequestIdCounter;
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index c7de56a..c0d6da6 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -31,7 +31,7 @@
 
 #include "api2/CameraDeviceClient.h"
 
-#include "CameraDeviceFactory.h"
+#include "device3/Camera3Device.h"
 
 namespace android {
 using namespace camera2;
@@ -62,7 +62,7 @@
             String8(clientPackageName).string(), clientPid, clientUid);
 
     mInitialClientPid = clientPid;
-    mDevice = CameraDeviceFactory::createDevice(cameraId);
+    mDevice = new Camera3Device(cameraId);
     LOG_ALWAYS_FATAL_IF(mDevice == 0, "Device should never be NULL here.");
 }
 
@@ -169,14 +169,15 @@
 
 
 template <typename TClientBase>
-void Camera2ClientBase<TClientBase>::disconnect() {
+binder::Status Camera2ClientBase<TClientBase>::disconnect() {
     ATRACE_CALL();
     Mutex::Autolock icl(mBinderSerializationLock);
 
+    binder::Status res = binder::Status::ok();
     // Allow both client and the media server to disconnect at all times
     int callingPid = getCallingPid();
     if (callingPid != TClientBase::mClientPid &&
-        callingPid != TClientBase::mServicePid) return;
+        callingPid != TClientBase::mServicePid) return res;
 
     ALOGV("Camera %d: Shutting down", TClientBase::mCameraId);
 
@@ -185,6 +186,8 @@
     CameraService::BasicClient::disconnect();
 
     ALOGV("Camera %d: Shut down complete complete", TClientBase::mCameraId);
+
+    return res;
 }
 
 template <typename TClientBase>
@@ -228,7 +231,7 @@
 
 template <typename TClientBase>
 void Camera2ClientBase<TClientBase>::notifyError(
-        ICameraDeviceCallbacks::CameraErrorCode errorCode,
+        int32_t errorCode,
         const CaptureResultExtras& resultExtras) {
     ALOGE("Error condition %d reported by HAL, requestId %" PRId32, errorCode,
           resultExtras.requestId);
@@ -303,6 +306,14 @@
 }
 
 template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyRepeatingRequestError(long lastFrameNumber) {
+    (void)lastFrameNumber;
+
+    ALOGV("%s: Repeating request was stopped. Last frame number is %ld",
+            __FUNCTION__, lastFrameNumber);
+}
+
+template <typename TClientBase>
 int Camera2ClientBase<TClientBase>::getCameraId() const {
     return TClientBase::mCameraId;
 }
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 4568af0..4f60034 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -38,8 +38,8 @@
     /**
      * Base binder interface (see ICamera/ICameraDeviceUser for details)
      */
-    virtual status_t      connect(const sp<TCamCallbacks>& callbacks);
-    virtual void          disconnect();
+    virtual status_t       connect(const sp<TCamCallbacks>& callbacks);
+    virtual binder::Status disconnect();
 
     /**
      * Interface used by CameraService
@@ -63,7 +63,7 @@
      * CameraDeviceBase::NotificationListener implementation
      */
 
-    virtual void          notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+    virtual void          notifyError(int32_t errorCode,
                                       const CaptureResultExtras& resultExtras);
     virtual void          notifyIdle();
     virtual void          notifyShutter(const CaptureResultExtras& resultExtras,
@@ -73,6 +73,7 @@
     virtual void          notifyAutoWhitebalance(uint8_t newState,
                                                  int triggerId);
     virtual void          notifyPrepared(int streamId);
+    virtual void          notifyRepeatingRequestError(long lastFrameNumber);
 
     int                   getCameraId() const;
     const sp<CameraDeviceBase>&
@@ -125,7 +126,7 @@
     // that mBinderSerializationLock is locked when they're called
     mutable Mutex         mBinderSerializationLock;
 
-    /** CameraDeviceBase instance wrapping HAL2+ entry */
+    /** CameraDeviceBase instance wrapping HAL3+ entry */
 
     const int mDeviceVersion;
     sp<CameraDeviceBase>  mDevice;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 7b083a3..35ec531 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -24,13 +24,14 @@
 #include <utils/Timers.h>
 #include <utils/List.h>
 
-#include <camera/camera2/ICameraDeviceCallbacks.h>
 #include "hardware/camera2.h"
 #include "hardware/camera3.h"
 #include "camera/CameraMetadata.h"
 #include "camera/CaptureResult.h"
 #include "common/CameraModule.h"
 #include "gui/IGraphicBufferProducer.h"
+#include "device3/Camera3StreamInterface.h"
+#include "binder/Status.h"
 
 namespace android {
 
@@ -108,7 +109,8 @@
      */
     virtual status_t createStream(sp<Surface> consumer,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id) = 0;
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID) = 0;
 
     /**
      * Create an input stream of width, height, and format.
@@ -193,7 +195,7 @@
         // API1 and API2.
 
         // Required for API 1 and 2
-        virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+        virtual void notifyError(int32_t errorCode,
                                  const CaptureResultExtras &resultExtras) = 0;
 
         // Required only for API2
@@ -207,6 +209,7 @@
         virtual void notifyAutoExposure(uint8_t newState, int triggerId) = 0;
         virtual void notifyAutoWhitebalance(uint8_t newState,
                 int triggerId) = 0;
+        virtual void notifyRepeatingRequestError(long lastFrameNumber) = 0;
       protected:
         virtual ~NotificationListener();
     };
@@ -294,6 +297,12 @@
     virtual status_t tearDown(int streamId) = 0;
 
     /**
+     * Add buffer listener for a particular stream in the device.
+     */
+    virtual status_t addBufferListenerForStream(int streamId,
+            wp<camera3::Camera3StreamBufferListener> listener) = 0;
+
+    /**
      * Prepare stream by preallocating up to maxCount buffers for it asynchronously.
      * Calls notifyPrepared() once allocation is complete.
      */
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
index 16b8aba..073144c 100644
--- a/services/camera/libcameraservice/common/CameraModule.cpp
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -27,15 +27,12 @@
 void CameraModule::deriveCameraCharacteristicsKeys(
         uint32_t deviceVersion, CameraMetadata &chars) {
     ATRACE_CALL();
-    // HAL1 devices should not reach here
-    if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
-        ALOGV("%s: Cannot derive keys for HAL version < 2.0");
-        return;
-    }
 
+    Vector<int32_t> derivedCharKeys;
+    Vector<int32_t> derivedRequestKeys;
+    Vector<int32_t> derivedResultKeys;
     // Keys added in HAL3.3
     if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_3) {
-        const size_t NUM_DERIVED_KEYS_HAL3_3 = 5;
         Vector<uint8_t> controlModes;
         uint8_t data = ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE;
         chars.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &data, /*count*/1);
@@ -107,18 +104,11 @@
         chars.update(ANDROID_SHADING_AVAILABLE_MODES, lscModes);
         chars.update(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, lscMapModes);
 
-        entry = chars.find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
-        Vector<int32_t> availableCharsKeys;
-        availableCharsKeys.setCapacity(entry.count + NUM_DERIVED_KEYS_HAL3_3);
-        for (size_t i = 0; i < entry.count; i++) {
-            availableCharsKeys.push(entry.data.i32[i]);
-        }
-        availableCharsKeys.push(ANDROID_CONTROL_AE_LOCK_AVAILABLE);
-        availableCharsKeys.push(ANDROID_CONTROL_AWB_LOCK_AVAILABLE);
-        availableCharsKeys.push(ANDROID_CONTROL_AVAILABLE_MODES);
-        availableCharsKeys.push(ANDROID_SHADING_AVAILABLE_MODES);
-        availableCharsKeys.push(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES);
-        chars.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, availableCharsKeys);
+        derivedCharKeys.push(ANDROID_CONTROL_AE_LOCK_AVAILABLE);
+        derivedCharKeys.push(ANDROID_CONTROL_AWB_LOCK_AVAILABLE);
+        derivedCharKeys.push(ANDROID_CONTROL_AVAILABLE_MODES);
+        derivedCharKeys.push(ANDROID_SHADING_AVAILABLE_MODES);
+        derivedCharKeys.push(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES);
 
         // Need update android.control.availableHighSpeedVideoConfigurations since HAL3.3
         // adds batch size to this array.
@@ -137,6 +127,68 @@
         }
     }
 
+    // Keys added in HAL3.4
+    if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_4) {
+        // Check if HAL supports RAW_OPAQUE output
+        camera_metadata_entry entry = chars.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+        bool supportRawOpaque = false;
+        bool supportAnyRaw = false;
+        const int STREAM_CONFIGURATION_SIZE = 4;
+        const int STREAM_FORMAT_OFFSET = 0;
+        const int STREAM_WIDTH_OFFSET = 1;
+        const int STREAM_HEIGHT_OFFSET = 2;
+        const int STREAM_IS_INPUT_OFFSET = 3;
+        Vector<int32_t> rawOpaqueSizes;
+
+        for (size_t i=0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
+            int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
+            int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
+            int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
+            int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
+            if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+                    format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
+                supportRawOpaque = true;
+                rawOpaqueSizes.push(width);
+                rawOpaqueSizes.push(height);
+                // 2 bytes per pixel. This rough estimation is only used when
+                // HAL does not fill in the opaque raw size
+                rawOpaqueSizes.push(width * height *2);
+            }
+            if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+                    (format == HAL_PIXEL_FORMAT_RAW16 ||
+                     format == HAL_PIXEL_FORMAT_RAW10 ||
+                     format == HAL_PIXEL_FORMAT_RAW12 ||
+                     format == HAL_PIXEL_FORMAT_RAW_OPAQUE)) {
+                supportAnyRaw = true;
+            }
+        }
+
+        if (supportRawOpaque) {
+            entry = chars.find(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
+            if (entry.count == 0) {
+                // Fill in estimated value if HAL does not list it
+                chars.update(ANDROID_SENSOR_OPAQUE_RAW_SIZE, rawOpaqueSizes);
+                derivedCharKeys.push(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
+            }
+        }
+
+        // Check if HAL supports any RAW output, if so, fill in postRawSensitivityBoost range
+        if (supportAnyRaw) {
+            int32_t defaultRange[2] = {100, 100};
+            entry = chars.find(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE);
+            if (entry.count == 0) {
+                // Fill in default value (100, 100)
+                chars.update(
+                        ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE,
+                        defaultRange, 2);
+                derivedCharKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE);
+                // Actual request/results will be derived by camera device.
+                derivedRequestKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST);
+                derivedResultKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST);
+            }
+        }
+    }
+
     // Always add a default for the pre-correction active array if the vendor chooses to omit this
     camera_metadata_entry entry = chars.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
     if (entry.count == 0) {
@@ -144,11 +196,40 @@
         entry = chars.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
         preCorrectionArray.appendArray(entry.data.i32, entry.count);
         chars.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, preCorrectionArray);
+        derivedCharKeys.push(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
     }
 
+    // Add those newly added keys to AVAILABLE_CHARACTERISTICS_KEYS
+    // This has to be done at this end of this function.
+    if (derivedCharKeys.size() > 0) {
+        appendAvailableKeys(
+                chars, ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, derivedCharKeys);
+    }
+    if (derivedRequestKeys.size() > 0) {
+        appendAvailableKeys(
+                chars, ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, derivedRequestKeys);
+    }
+    if (derivedResultKeys.size() > 0) {
+        appendAvailableKeys(
+                chars, ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, derivedResultKeys);
+    }
     return;
 }
 
+void CameraModule::appendAvailableKeys(CameraMetadata &chars,
+        int32_t keyTag, const Vector<int32_t>& appendKeys) {
+    camera_metadata_entry entry = chars.find(keyTag);
+    Vector<int32_t> availableKeys;
+    availableKeys.setCapacity(entry.count + appendKeys.size());
+    for (size_t i = 0; i < entry.count; i++) {
+        availableKeys.push(entry.data.i32[i]);
+    }
+    for (size_t i = 0; i < appendKeys.size(); i++) {
+        availableKeys.push(appendKeys[i]);
+    }
+    chars.update(keyTag, availableKeys);
+}
+
 CameraModule::CameraModule(camera_module_t *module) {
     if (module == NULL) {
         ALOGE("%s: camera hardware module must not be null", __FUNCTION__);
@@ -196,6 +277,9 @@
         int ret;
         ATRACE_BEGIN("camera_module->get_camera_info");
         ret = mModule->get_camera_info(cameraId, info);
+        // Fill in this so CameraService won't be confused by
+        // possibly 0 device_version
+        info->device_version = CAMERA_DEVICE_API_VERSION_1_0;
         ATRACE_END();
         return ret;
     }
@@ -211,7 +295,7 @@
             return ret;
         }
         int deviceVersion = rawInfo.device_version;
-        if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
+        if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_0) {
             // static_camera_characteristics is invalid
             *info = rawInfo;
             return ret;
diff --git a/services/camera/libcameraservice/common/CameraModule.h b/services/camera/libcameraservice/common/CameraModule.h
index 36822c7..1a1c274 100644
--- a/services/camera/libcameraservice/common/CameraModule.h
+++ b/services/camera/libcameraservice/common/CameraModule.h
@@ -57,8 +57,10 @@
 private:
     // Derive camera characteristics keys defined after HAL device version
     static void deriveCameraCharacteristicsKeys(uint32_t deviceVersion, CameraMetadata &chars);
+    // Helper function to append available[request|result|chars]Keys
+    static void appendAvailableKeys(CameraMetadata &chars,
+            int32_t keyTag, const Vector<int32_t>& appendKeys);
     status_t filterOpenErrorCode(status_t err);
-
     camera_module_t *mModule;
     KeyedVector<int, camera_info> mCameraInfoMap;
     Mutex mCameraInfoLock;
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 7f14cd4..bce0762 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -73,10 +73,18 @@
 
 class CameraHardwareInterface : public virtual RefBase {
 public:
-    CameraHardwareInterface(const char *name)
+    CameraHardwareInterface(const char *name):
+            mDevice(nullptr),
+            mName(name),
+            mPreviewScalingMode(NOT_SET),
+            mPreviewTransform(NOT_SET),
+            mPreviewWidth(NOT_SET),
+            mPreviewHeight(NOT_SET),
+            mPreviewFormat(NOT_SET),
+            mPreviewUsage(0),
+            mPreviewSwapInterval(NOT_SET),
+            mPreviewCrop{NOT_SET,NOT_SET,NOT_SET,NOT_SET}
     {
-        mDevice = 0;
-        mName = name;
     }
 
     ~CameraHardwareInterface()
@@ -94,7 +102,9 @@
         ALOGI("Opening camera %s", mName.string());
         camera_info info;
         status_t res = module->getCameraInfo(atoi(mName.string()), &info);
-        if (res != OK) return res;
+        if (res != OK) {
+            return res;
+        }
 
         int rc = OK;
         if (module->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_3 &&
@@ -118,9 +128,16 @@
     status_t setPreviewWindow(const sp<ANativeWindow>& buf)
     {
         ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());
-
         if (mDevice->ops->set_preview_window) {
             mPreviewWindow = buf;
+            if (buf != nullptr) {
+                if (mPreviewScalingMode != NOT_SET) {
+                    setPreviewScalingMode(mPreviewScalingMode);
+                }
+                if (mPreviewTransform != NOT_SET) {
+                    setPreviewTransform(mPreviewTransform);
+                }
+            }
             mHalPreviewWindow.user = this;
             ALOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p", __FUNCTION__,
                     &mHalPreviewWindow, mHalPreviewWindow.user);
@@ -130,6 +147,27 @@
         return INVALID_OPERATION;
     }
 
+    status_t setPreviewScalingMode(int scalingMode)
+    {
+        int rc = OK;
+        mPreviewScalingMode = scalingMode;
+        if (mPreviewWindow != nullptr) {
+            rc = native_window_set_scaling_mode(mPreviewWindow.get(),
+                    scalingMode);
+        }
+        return rc;
+    }
+
+    status_t setPreviewTransform(int transform) {
+        int rc = OK;
+        mPreviewTransform = transform;
+        if (mPreviewWindow != nullptr) {
+            rc = native_window_set_buffers_transform(mPreviewWindow.get(),
+                    mPreviewTransform);
+        }
+        return rc;
+    }
+
     /** Set the notification and data callbacks */
     void setCallbacks(notify_callback notify_cb,
                       data_callback data_cb,
@@ -569,6 +607,8 @@
         return __this->mPreviewWindow.get();
     }
 #define anw(n) __to_anw(((struct camera_preview_window *)n)->user)
+#define hwi(n) reinterpret_cast<CameraHardwareInterface *>(\
+        ((struct camera_preview_window *)n)->user)
 
     static int __dequeue_buffer(struct preview_stream_ops* w,
                                 buffer_handle_t** buffer, int *stride)
@@ -617,6 +657,44 @@
     static int __set_buffer_count(struct preview_stream_ops* w, int count)
     {
         ANativeWindow *a = anw(w);
+
+        if (a != nullptr) {
+            // Workaround for b/27039775
+            // Previously, setting the buffer count would reset the buffer
+            // queue's flag that allows for all buffers to be dequeued on the
+            // producer side, instead of just the producer's declared max count,
+            // if no filled buffers have yet been queued by the producer.  This
+            // reset no longer happens, but some HALs depend on this behavior,
+            // so it needs to be maintained for HAL backwards compatibility.
+            // Simulate the prior behavior by disconnecting/reconnecting to the
+            // window and setting the values again.  This has the drawback of
+            // actually causing memory reallocation, which may not have happened
+            // in the past.
+            CameraHardwareInterface *hw = hwi(w);
+            native_window_api_disconnect(a, NATIVE_WINDOW_API_CAMERA);
+            native_window_api_connect(a, NATIVE_WINDOW_API_CAMERA);
+            if (hw->mPreviewScalingMode != NOT_SET) {
+                native_window_set_scaling_mode(a, hw->mPreviewScalingMode);
+            }
+            if (hw->mPreviewTransform != NOT_SET) {
+                native_window_set_buffers_transform(a, hw->mPreviewTransform);
+            }
+            if (hw->mPreviewWidth != NOT_SET) {
+                native_window_set_buffers_dimensions(a,
+                        hw->mPreviewWidth, hw->mPreviewHeight);
+                native_window_set_buffers_format(a, hw->mPreviewFormat);
+            }
+            if (hw->mPreviewUsage != 0) {
+                native_window_set_usage(a, hw->mPreviewUsage);
+            }
+            if (hw->mPreviewSwapInterval != NOT_SET) {
+                a->setSwapInterval(a, hw->mPreviewSwapInterval);
+            }
+            if (hw->mPreviewCrop.left != NOT_SET) {
+                native_window_set_crop(a, &(hw->mPreviewCrop));
+            }
+        }
+
         return native_window_set_buffer_count(a, count);
     }
 
@@ -625,7 +703,10 @@
     {
         int rc;
         ANativeWindow *a = anw(w);
-
+        CameraHardwareInterface *hw = hwi(w);
+        hw->mPreviewWidth = width;
+        hw->mPreviewHeight = height;
+        hw->mPreviewFormat = format;
         rc = native_window_set_buffers_dimensions(a, width, height);
         if (!rc) {
             rc = native_window_set_buffers_format(a, format);
@@ -637,12 +718,12 @@
                       int left, int top, int right, int bottom)
     {
         ANativeWindow *a = anw(w);
-        android_native_rect_t crop;
-        crop.left = left;
-        crop.top = top;
-        crop.right = right;
-        crop.bottom = bottom;
-        return native_window_set_crop(a, &crop);
+        CameraHardwareInterface *hw = hwi(w);
+        hw->mPreviewCrop.left = left;
+        hw->mPreviewCrop.top = top;
+        hw->mPreviewCrop.right = right;
+        hw->mPreviewCrop.bottom = bottom;
+        return native_window_set_crop(a, &(hw->mPreviewCrop));
     }
 
     static int __set_timestamp(struct preview_stream_ops *w,
@@ -654,12 +735,16 @@
     static int __set_usage(struct preview_stream_ops* w, int usage)
     {
         ANativeWindow *a = anw(w);
+        CameraHardwareInterface *hw = hwi(w);
+        hw->mPreviewUsage = usage;
         return native_window_set_usage(a, usage);
     }
 
     static int __set_swap_interval(struct preview_stream_ops *w, int interval)
     {
         ANativeWindow *a = anw(w);
+        CameraHardwareInterface *hw = hwi(w);
+        hw->mPreviewSwapInterval = interval;
         return a->setSwapInterval(a, interval);
     }
 
@@ -701,6 +786,17 @@
     data_callback           mDataCb;
     data_callback_timestamp mDataCbTimestamp;
     void *mCbUser;
+
+    // Cached values for preview stream parameters
+    static const int NOT_SET = -1;
+    int mPreviewScalingMode;
+    int mPreviewTransform;
+    int mPreviewWidth;
+    int mPreviewHeight;
+    int mPreviewFormat;
+    int mPreviewUsage;
+    int mPreviewSwapInterval;
+    android_native_rect_t mPreviewCrop;
 };
 
 };  // namespace android
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
deleted file mode 100644
index d74f976..0000000
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ /dev/null
@@ -1,1618 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera2-Device"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-//#define LOG_NNDEBUG 0  // Per-frame verbose logging
-
-#ifdef LOG_NNDEBUG
-#define ALOGVV(...) ALOGV(__VA_ARGS__)
-#else
-#define ALOGVV(...) ((void)0)
-#endif
-
-#include <inttypes.h>
-#include <utils/Log.h>
-#include <utils/Trace.h>
-#include <utils/Timers.h>
-#include "Camera2Device.h"
-#include "CameraService.h"
-
-namespace android {
-
-Camera2Device::Camera2Device(int id):
-        mId(id),
-        mHal2Device(NULL)
-{
-    ATRACE_CALL();
-    ALOGV("%s: Created device for camera %d", __FUNCTION__, id);
-}
-
-Camera2Device::~Camera2Device()
-{
-    ATRACE_CALL();
-    ALOGV("%s: Tearing down for camera id %d", __FUNCTION__, mId);
-    disconnect();
-}
-
-int Camera2Device::getId() const {
-    return mId;
-}
-
-status_t Camera2Device::initialize(CameraModule *module)
-{
-    ATRACE_CALL();
-    ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
-    if (mHal2Device != NULL) {
-        ALOGE("%s: Already initialized!", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-
-    status_t res;
-    char name[10];
-    snprintf(name, sizeof(name), "%d", mId);
-
-    camera2_device_t *device;
-
-    res = module->open(name, reinterpret_cast<hw_device_t**>(&device));
-
-    if (res != OK) {
-        ALOGE("%s: Could not open camera %d: %s (%d)", __FUNCTION__,
-                mId, strerror(-res), res);
-        return res;
-    }
-
-    if (device->common.version != CAMERA_DEVICE_API_VERSION_2_0) {
-        ALOGE("%s: Could not open camera %d: "
-                "Camera device is not version %x, reports %x instead",
-                __FUNCTION__, mId, CAMERA_DEVICE_API_VERSION_2_0,
-                device->common.version);
-        device->common.close(&device->common);
-        return BAD_VALUE;
-    }
-
-    camera_info info;
-    res = module->getCameraInfo(mId, &info);
-    if (res != OK ) return res;
-
-    if (info.device_version != device->common.version) {
-        ALOGE("%s: HAL reporting mismatched camera_info version (%x)"
-                " and device version (%x).", __FUNCTION__,
-                device->common.version, info.device_version);
-        device->common.close(&device->common);
-        return BAD_VALUE;
-    }
-
-    res = mRequestQueue.setConsumerDevice(device);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to connect request queue to device: %s (%d)",
-                __FUNCTION__, mId, strerror(-res), res);
-        device->common.close(&device->common);
-        return res;
-    }
-    res = mFrameQueue.setProducerDevice(device);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to connect frame queue to device: %s (%d)",
-                __FUNCTION__, mId, strerror(-res), res);
-        device->common.close(&device->common);
-        return res;
-    }
-
-    res = device->ops->set_notify_callback(device, notificationCallback,
-            NULL);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to initialize notification callback!",
-                __FUNCTION__, mId);
-        device->common.close(&device->common);
-        return res;
-    }
-
-    mDeviceInfo = info.static_camera_characteristics;
-    mHal2Device = device;
-    mDeviceVersion = device->common.version;
-
-    return OK;
-}
-
-status_t Camera2Device::disconnect() {
-    ATRACE_CALL();
-    status_t res = OK;
-    if (mHal2Device) {
-        ALOGV("%s: Closing device for camera %d", __FUNCTION__, mId);
-
-        int inProgressCount = mHal2Device->ops->get_in_progress_count(mHal2Device);
-        if (inProgressCount > 0) {
-            ALOGW("%s: Closing camera device %d with %d requests in flight!",
-                    __FUNCTION__, mId, inProgressCount);
-        }
-        mReprocessStreams.clear();
-        mStreams.clear();
-        res = mHal2Device->common.close(&mHal2Device->common);
-        if (res != OK) {
-            ALOGE("%s: Could not close camera %d: %s (%d)",
-                    __FUNCTION__,
-                    mId, strerror(-res), res);
-        }
-        mHal2Device = NULL;
-        ALOGV("%s: Shutdown complete", __FUNCTION__);
-    }
-    return res;
-}
-
-status_t Camera2Device::dump(int fd, const Vector<String16>& args) {
-    ATRACE_CALL();
-    String8 result;
-    int detailLevel = 0;
-    int n = args.size();
-    String16 detailOption("-d");
-    for (int i = 0; i + 1 < n; i++) {
-        if (args[i] == detailOption) {
-            String8 levelStr(args[i+1]);
-            detailLevel = atoi(levelStr.string());
-        }
-    }
-
-    result.appendFormat("  Camera2Device[%d] dump (detail level %d):\n",
-            mId, detailLevel);
-
-    if (detailLevel > 0) {
-        result = "    Request queue contents:\n";
-        write(fd, result.string(), result.size());
-        mRequestQueue.dump(fd, args);
-
-        result = "    Frame queue contents:\n";
-        write(fd, result.string(), result.size());
-        mFrameQueue.dump(fd, args);
-    }
-
-    result = "    Active streams:\n";
-    write(fd, result.string(), result.size());
-    for (StreamList::iterator s = mStreams.begin(); s != mStreams.end(); s++) {
-        (*s)->dump(fd, args);
-    }
-
-    result = "    HAL device dump:\n";
-    write(fd, result.string(), result.size());
-
-    status_t res;
-    res = mHal2Device->ops->dump(mHal2Device, fd);
-
-    return res;
-}
-
-const CameraMetadata& Camera2Device::info() const {
-    ALOGVV("%s: E", __FUNCTION__);
-
-    return mDeviceInfo;
-}
-
-status_t Camera2Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) {
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-
-    mRequestQueue.enqueue(request.release());
-    return OK;
-}
-
-status_t Camera2Device::captureList(const List<const CameraMetadata> &requests,
-                                    int64_t* /*lastFrameNumber*/) {
-    ATRACE_CALL();
-    ALOGE("%s: Camera2Device burst capture not implemented", __FUNCTION__);
-    return INVALID_OPERATION;
-}
-
-status_t Camera2Device::setStreamingRequest(const CameraMetadata &request,
-                                            int64_t* /*lastFrameNumber*/) {
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-    CameraMetadata streamRequest(request);
-    return mRequestQueue.setStreamSlot(streamRequest.release());
-}
-
-status_t Camera2Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
-                                                int64_t* /*lastFrameNumber*/) {
-    ATRACE_CALL();
-    ALOGE("%s, Camera2Device streaming burst not implemented", __FUNCTION__);
-    return INVALID_OPERATION;
-}
-
-status_t Camera2Device::clearStreamingRequest(int64_t* /*lastFrameNumber*/) {
-    ATRACE_CALL();
-    return mRequestQueue.setStreamSlot(NULL);
-}
-
-status_t Camera2Device::waitUntilRequestReceived(int32_t requestId, nsecs_t timeout) {
-    ATRACE_CALL();
-    return mRequestQueue.waitForDequeue(requestId, timeout);
-}
-
-status_t Camera2Device::createStream(sp<Surface> consumer,
-        uint32_t width, uint32_t height, int format,
-        android_dataspace /*dataSpace*/, camera3_stream_rotation_t rotation, int *id) {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: E", __FUNCTION__);
-
-    sp<StreamAdapter> stream = new StreamAdapter(mHal2Device);
-    size_t size = 0;
-    if (format == HAL_PIXEL_FORMAT_BLOB) {
-        size = getJpegBufferSize(width, height);
-    }
-    res = stream->connectToDevice(consumer, width, height, format, size);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to create stream (%d x %d, format %x):"
-                "%s (%d)",
-                __FUNCTION__, mId, width, height, format, strerror(-res), res);
-        return res;
-    }
-
-    *id = stream->getId();
-
-    mStreams.push_back(stream);
-    return OK;
-}
-
-ssize_t Camera2Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
-    // Always give the max jpeg buffer size regardless of the actual jpeg resolution.
-    camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
-    if (jpegBufMaxSize.count == 0) {
-        ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId);
-        return BAD_VALUE;
-    }
-
-    return jpegBufMaxSize.data.i32[0];
-}
-
-status_t Camera2Device::createReprocessStreamFromStream(int outputId, int *id) {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: E", __FUNCTION__);
-
-    bool found = false;
-    StreamList::iterator streamI;
-    for (streamI = mStreams.begin();
-         streamI != mStreams.end(); streamI++) {
-        if ((*streamI)->getId() == outputId) {
-            found = true;
-            break;
-        }
-    }
-    if (!found) {
-        ALOGE("%s: Camera %d: Output stream %d doesn't exist; can't create "
-                "reprocess stream from it!", __FUNCTION__, mId, outputId);
-        return BAD_VALUE;
-    }
-
-    sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mHal2Device);
-
-    res = stream->connectToDevice((*streamI));
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Unable to create reprocessing stream from "\
-                "stream %d: %s (%d)", __FUNCTION__, mId, outputId,
-                strerror(-res), res);
-        return res;
-    }
-
-    *id = stream->getId();
-
-    mReprocessStreams.push_back(stream);
-    return OK;
-}
-
-
-status_t Camera2Device::getStreamInfo(int id,
-        uint32_t *width, uint32_t *height,
-        uint32_t *format, android_dataspace *dataSpace) {
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-    bool found = false;
-    StreamList::iterator streamI;
-    for (streamI = mStreams.begin();
-         streamI != mStreams.end(); streamI++) {
-        if ((*streamI)->getId() == id) {
-            found = true;
-            break;
-        }
-    }
-    if (!found) {
-        ALOGE("%s: Camera %d: Stream %d does not exist",
-                __FUNCTION__, mId, id);
-        return BAD_VALUE;
-    }
-
-    if (width) *width = (*streamI)->getWidth();
-    if (height) *height = (*streamI)->getHeight();
-    if (format) *format = (*streamI)->getFormat();
-    if (dataSpace) *dataSpace = HAL_DATASPACE_UNKNOWN;
-
-    return OK;
-}
-
-status_t Camera2Device::setStreamTransform(int id,
-        int transform) {
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-    bool found = false;
-    StreamList::iterator streamI;
-    for (streamI = mStreams.begin();
-         streamI != mStreams.end(); streamI++) {
-        if ((*streamI)->getId() == id) {
-            found = true;
-            break;
-        }
-    }
-    if (!found) {
-        ALOGE("%s: Camera %d: Stream %d does not exist",
-                __FUNCTION__, mId, id);
-        return BAD_VALUE;
-    }
-
-    return (*streamI)->setTransform(transform);
-}
-
-status_t Camera2Device::deleteStream(int id) {
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-    bool found = false;
-    for (StreamList::iterator streamI = mStreams.begin();
-         streamI != mStreams.end(); streamI++) {
-        if ((*streamI)->getId() == id) {
-            status_t res = (*streamI)->release();
-            if (res != OK) {
-                ALOGE("%s: Unable to release stream %d from HAL device: "
-                        "%s (%d)", __FUNCTION__, id, strerror(-res), res);
-                return res;
-            }
-            mStreams.erase(streamI);
-            found = true;
-            break;
-        }
-    }
-    if (!found) {
-        ALOGE("%s: Camera %d: Unable to find stream %d to delete",
-                __FUNCTION__, mId, id);
-        return BAD_VALUE;
-    }
-    return OK;
-}
-
-status_t Camera2Device::deleteReprocessStream(int id) {
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-    bool found = false;
-    for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
-         streamI != mReprocessStreams.end(); streamI++) {
-        if ((*streamI)->getId() == id) {
-            status_t res = (*streamI)->release();
-            if (res != OK) {
-                ALOGE("%s: Unable to release reprocess stream %d from "
-                        "HAL device: %s (%d)", __FUNCTION__, id,
-                        strerror(-res), res);
-                return res;
-            }
-            mReprocessStreams.erase(streamI);
-            found = true;
-            break;
-        }
-    }
-    if (!found) {
-        ALOGE("%s: Camera %d: Unable to find stream %d to delete",
-                __FUNCTION__, mId, id);
-        return BAD_VALUE;
-    }
-    return OK;
-}
-
-status_t Camera2Device::configureStreams(bool isConstrainedHighSpeed) {
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-
-    /**
-     * HAL2 devices do not need to configure streams;
-     * streams are created on the fly.
-     */
-    ALOGW("%s: No-op for HAL2 devices", __FUNCTION__);
-
-    return OK;
-}
-
-
-status_t Camera2Device::createDefaultRequest(int templateId,
-        CameraMetadata *request) {
-    ATRACE_CALL();
-    status_t err;
-    ALOGV("%s: E", __FUNCTION__);
-    camera_metadata_t *rawRequest;
-    err = mHal2Device->ops->construct_default_request(
-        mHal2Device, templateId, &rawRequest);
-    request->acquire(rawRequest);
-    return err;
-}
-
-status_t Camera2Device::waitUntilDrained() {
-    ATRACE_CALL();
-    static const uint32_t kSleepTime = 50000; // 50 ms
-    static const uint32_t kMaxSleepTime = 10000000; // 10 s
-    ALOGV("%s: Camera %d: Starting wait", __FUNCTION__, mId);
-    if (mRequestQueue.getBufferCount() ==
-            CAMERA2_REQUEST_QUEUE_IS_BOTTOMLESS) return INVALID_OPERATION;
-
-    // TODO: Set up notifications from HAL, instead of sleeping here
-    uint32_t totalTime = 0;
-    while (mHal2Device->ops->get_in_progress_count(mHal2Device) > 0) {
-        usleep(kSleepTime);
-        totalTime += kSleepTime;
-        if (totalTime > kMaxSleepTime) {
-            ALOGE("%s: Waited %d us, %d requests still in flight", __FUNCTION__,
-                    totalTime, mHal2Device->ops->get_in_progress_count(mHal2Device));
-            return TIMED_OUT;
-        }
-    }
-    ALOGV("%s: Camera %d: HAL is idle", __FUNCTION__, mId);
-    return OK;
-}
-
-status_t Camera2Device::setNotifyCallback(NotificationListener *listener) {
-    ATRACE_CALL();
-    status_t res;
-    res = mHal2Device->ops->set_notify_callback(mHal2Device, notificationCallback,
-            reinterpret_cast<void*>(listener) );
-    if (res != OK) {
-        ALOGE("%s: Unable to set notification callback!", __FUNCTION__);
-    }
-    return res;
-}
-
-bool Camera2Device::willNotify3A() {
-    return true;
-}
-
-void Camera2Device::notificationCallback(int32_t msg_type,
-        int32_t ext1,
-        int32_t ext2,
-        int32_t ext3,
-        void *user) {
-    ATRACE_CALL();
-    NotificationListener *listener = reinterpret_cast<NotificationListener*>(user);
-    ALOGV("%s: Notification %d, arguments %d, %d, %d", __FUNCTION__, msg_type,
-            ext1, ext2, ext3);
-    if (listener != NULL) {
-        switch (msg_type) {
-            case CAMERA2_MSG_ERROR:
-                // TODO: This needs to be fixed. ext2 and ext3 need to be considered.
-                listener->notifyError(
-                        ((ext1 == CAMERA2_MSG_ERROR_DEVICE)
-                        || (ext1 == CAMERA2_MSG_ERROR_HARDWARE)) ?
-                                ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE :
-                                ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE,
-                        CaptureResultExtras());
-                break;
-            case CAMERA2_MSG_SHUTTER: {
-                // TODO: Only needed for camera2 API, which is unsupported
-                // by HAL2 directly.
-                // nsecs_t timestamp = (nsecs_t)ext2 | ((nsecs_t)(ext3) << 32 );
-                // listener->notifyShutter(requestId, timestamp);
-                break;
-            }
-            case CAMERA2_MSG_AUTOFOCUS:
-                listener->notifyAutoFocus(ext1, ext2);
-                break;
-            case CAMERA2_MSG_AUTOEXPOSURE:
-                listener->notifyAutoExposure(ext1, ext2);
-                break;
-            case CAMERA2_MSG_AUTOWB:
-                listener->notifyAutoWhitebalance(ext1, ext2);
-                break;
-            default:
-                ALOGE("%s: Unknown notification %d (arguments %d, %d, %d)!",
-                        __FUNCTION__, msg_type, ext1, ext2, ext3);
-        }
-    }
-}
-
-status_t Camera2Device::waitForNextFrame(nsecs_t timeout) {
-    return mFrameQueue.waitForBuffer(timeout);
-}
-
-status_t Camera2Device::getNextResult(CaptureResult *result) {
-    ATRACE_CALL();
-    ALOGV("%s: get CaptureResult", __FUNCTION__);
-    if (result == NULL) {
-        ALOGE("%s: result pointer is NULL", __FUNCTION__);
-        return BAD_VALUE;
-    }
-    status_t res;
-    camera_metadata_t *rawFrame;
-    res = mFrameQueue.dequeue(&rawFrame);
-    if (rawFrame == NULL) {
-        return NOT_ENOUGH_DATA;
-    } else if (res == OK) {
-        result->mMetadata.acquire(rawFrame);
-    }
-
-    return res;
-}
-
-status_t Camera2Device::triggerAutofocus(uint32_t id) {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: Triggering autofocus, id %d", __FUNCTION__, id);
-    res = mHal2Device->ops->trigger_action(mHal2Device,
-            CAMERA2_TRIGGER_AUTOFOCUS, id, 0);
-    if (res != OK) {
-        ALOGE("%s: Error triggering autofocus (id %d)",
-                __FUNCTION__, id);
-    }
-    return res;
-}
-
-status_t Camera2Device::triggerCancelAutofocus(uint32_t id) {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: Canceling autofocus, id %d", __FUNCTION__, id);
-    res = mHal2Device->ops->trigger_action(mHal2Device,
-            CAMERA2_TRIGGER_CANCEL_AUTOFOCUS, id, 0);
-    if (res != OK) {
-        ALOGE("%s: Error canceling autofocus (id %d)",
-                __FUNCTION__, id);
-    }
-    return res;
-}
-
-status_t Camera2Device::triggerPrecaptureMetering(uint32_t id) {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: Triggering precapture metering, id %d", __FUNCTION__, id);
-    res = mHal2Device->ops->trigger_action(mHal2Device,
-            CAMERA2_TRIGGER_PRECAPTURE_METERING, id, 0);
-    if (res != OK) {
-        ALOGE("%s: Error triggering precapture metering (id %d)",
-                __FUNCTION__, id);
-    }
-    return res;
-}
-
-status_t Camera2Device::pushReprocessBuffer(int reprocessStreamId,
-        buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-    bool found = false;
-    status_t res = OK;
-    for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
-         streamI != mReprocessStreams.end(); streamI++) {
-        if ((*streamI)->getId() == reprocessStreamId) {
-            res = (*streamI)->pushIntoStream(buffer, listener);
-            if (res != OK) {
-                ALOGE("%s: Unable to push buffer to reprocess stream %d: %s (%d)",
-                        __FUNCTION__, reprocessStreamId, strerror(-res), res);
-                return res;
-            }
-            found = true;
-            break;
-        }
-    }
-    if (!found) {
-        ALOGE("%s: Camera %d: Unable to find reprocess stream %d",
-                __FUNCTION__, mId, reprocessStreamId);
-        res = BAD_VALUE;
-    }
-    return res;
-}
-
-status_t Camera2Device::flush(int64_t* /*lastFrameNumber*/) {
-    ATRACE_CALL();
-
-    mRequestQueue.clear();
-    return waitUntilDrained();
-}
-
-status_t Camera2Device::prepare(int streamId) {
-    ATRACE_CALL();
-    ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
-    return NO_INIT;
-}
-
-status_t Camera2Device::tearDown(int streamId) {
-    ATRACE_CALL();
-    ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
-    return NO_INIT;
-}
-
-status_t Camera2Device::prepare(int maxCount, int streamId) {
-    ATRACE_CALL();
-    ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
-    return NO_INIT;
-}
-
-uint32_t Camera2Device::getDeviceVersion() {
-    ATRACE_CALL();
-    return mDeviceVersion;
-}
-
-/**
- * Camera2Device::MetadataQueue
- */
-
-Camera2Device::MetadataQueue::MetadataQueue():
-            mHal2Device(NULL),
-            mFrameCount(0),
-            mLatestRequestId(0),
-            mCount(0),
-            mStreamSlotCount(0),
-            mSignalConsumer(true)
-{
-    ATRACE_CALL();
-    camera2_request_queue_src_ops::dequeue_request = consumer_dequeue;
-    camera2_request_queue_src_ops::request_count = consumer_buffer_count;
-    camera2_request_queue_src_ops::free_request = consumer_free;
-
-    camera2_frame_queue_dst_ops::dequeue_frame = producer_dequeue;
-    camera2_frame_queue_dst_ops::cancel_frame = producer_cancel;
-    camera2_frame_queue_dst_ops::enqueue_frame = producer_enqueue;
-}
-
-Camera2Device::MetadataQueue::~MetadataQueue() {
-    ATRACE_CALL();
-    clear();
-}
-
-// Connect to camera2 HAL as consumer (input requests/reprocessing)
-status_t Camera2Device::MetadataQueue::setConsumerDevice(camera2_device_t *d) {
-    ATRACE_CALL();
-    status_t res;
-    res = d->ops->set_request_queue_src_ops(d,
-            this);
-    if (res != OK) return res;
-    mHal2Device = d;
-    return OK;
-}
-
-status_t Camera2Device::MetadataQueue::setProducerDevice(camera2_device_t *d) {
-    ATRACE_CALL();
-    status_t res;
-    res = d->ops->set_frame_queue_dst_ops(d,
-            this);
-    return res;
-}
-
-// Real interfaces
-status_t Camera2Device::MetadataQueue::enqueue(camera_metadata_t *buf) {
-    ATRACE_CALL();
-    ALOGVV("%s: E", __FUNCTION__);
-    Mutex::Autolock l(mMutex);
-
-    mCount++;
-    mEntries.push_back(buf);
-
-    return signalConsumerLocked();
-}
-
-int Camera2Device::MetadataQueue::getBufferCount() {
-    ATRACE_CALL();
-    Mutex::Autolock l(mMutex);
-    if (mStreamSlotCount > 0) {
-        return CAMERA2_REQUEST_QUEUE_IS_BOTTOMLESS;
-    }
-    return mCount;
-}
-
-status_t Camera2Device::MetadataQueue::dequeue(camera_metadata_t **buf,
-        bool incrementCount)
-{
-    ATRACE_CALL();
-    ALOGVV("%s: E", __FUNCTION__);
-    status_t res;
-    Mutex::Autolock l(mMutex);
-
-    if (mCount == 0) {
-        if (mStreamSlotCount == 0) {
-            ALOGVV("%s: Empty", __FUNCTION__);
-            *buf = NULL;
-            mSignalConsumer = true;
-            return OK;
-        }
-        ALOGVV("%s: Streaming %d frames to queue", __FUNCTION__,
-              mStreamSlotCount);
-
-        for (List<camera_metadata_t*>::iterator slotEntry = mStreamSlot.begin();
-                slotEntry != mStreamSlot.end();
-                slotEntry++ ) {
-            size_t entries = get_camera_metadata_entry_count(*slotEntry);
-            size_t dataBytes = get_camera_metadata_data_count(*slotEntry);
-
-            camera_metadata_t *copy =
-                    allocate_camera_metadata(entries, dataBytes);
-            append_camera_metadata(copy, *slotEntry);
-            mEntries.push_back(copy);
-        }
-        mCount = mStreamSlotCount;
-    }
-    ALOGVV("MetadataQueue: deque (%d buffers)", mCount);
-    camera_metadata_t *b = *(mEntries.begin());
-    mEntries.erase(mEntries.begin());
-
-    if (incrementCount) {
-        ATRACE_INT("cam2_request", mFrameCount);
-        camera_metadata_entry_t frameCount;
-        res = find_camera_metadata_entry(b,
-                ANDROID_REQUEST_FRAME_COUNT,
-                &frameCount);
-        if (res != OK) {
-            ALOGE("%s: Unable to add frame count: %s (%d)",
-                    __FUNCTION__, strerror(-res), res);
-        } else {
-            *frameCount.data.i32 = mFrameCount;
-        }
-        mFrameCount++;
-    }
-
-    // Check for request ID, and if present, signal waiters.
-    camera_metadata_entry_t requestId;
-    res = find_camera_metadata_entry(b,
-            ANDROID_REQUEST_ID,
-            &requestId);
-    if (res == OK) {
-        mLatestRequestId = requestId.data.i32[0];
-        mNewRequestId.signal();
-    }
-
-    *buf = b;
-    mCount--;
-
-    return OK;
-}
-
-status_t Camera2Device::MetadataQueue::waitForBuffer(nsecs_t timeout)
-{
-    Mutex::Autolock l(mMutex);
-    status_t res;
-    while (mCount == 0) {
-        res = notEmpty.waitRelative(mMutex,timeout);
-        if (res != OK) return res;
-    }
-    return OK;
-}
-
-status_t Camera2Device::MetadataQueue::waitForDequeue(int32_t id,
-        nsecs_t timeout) {
-    Mutex::Autolock l(mMutex);
-    status_t res;
-    while (mLatestRequestId != id) {
-        nsecs_t startTime = systemTime();
-
-        res = mNewRequestId.waitRelative(mMutex, timeout);
-        if (res != OK) return res;
-
-        timeout -= (systemTime() - startTime);
-    }
-
-    return OK;
-}
-
-status_t Camera2Device::MetadataQueue::setStreamSlot(camera_metadata_t *buf)
-{
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-    Mutex::Autolock l(mMutex);
-    if (buf == NULL) {
-        freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
-        mStreamSlotCount = 0;
-        return OK;
-    }
-
-    if (mStreamSlotCount > 1) {
-        List<camera_metadata_t*>::iterator deleter = ++mStreamSlot.begin();
-        freeBuffers(++mStreamSlot.begin(), mStreamSlot.end());
-        mStreamSlotCount = 1;
-    }
-    if (mStreamSlotCount == 1) {
-        free_camera_metadata( *(mStreamSlot.begin()) );
-        *(mStreamSlot.begin()) = buf;
-    } else {
-        mStreamSlot.push_front(buf);
-        mStreamSlotCount = 1;
-    }
-    return signalConsumerLocked();
-}
-
-status_t Camera2Device::MetadataQueue::setStreamSlot(
-        const List<camera_metadata_t*> &bufs)
-{
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-    Mutex::Autolock l(mMutex);
-
-    if (mStreamSlotCount > 0) {
-        freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
-    }
-    mStreamSlotCount = 0;
-    for (List<camera_metadata_t*>::const_iterator r = bufs.begin();
-         r != bufs.end(); r++) {
-        mStreamSlot.push_back(*r);
-        mStreamSlotCount++;
-    }
-    return signalConsumerLocked();
-}
-
-status_t Camera2Device::MetadataQueue::clear()
-{
-    ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
-
-    Mutex::Autolock l(mMutex);
-
-    // Clear streaming slot
-    freeBuffers(mStreamSlot.begin(), mStreamSlot.end());
-    mStreamSlotCount = 0;
-
-    // Clear request queue
-    freeBuffers(mEntries.begin(), mEntries.end());
-    mCount = 0;
-    return OK;
-}
-
-status_t Camera2Device::MetadataQueue::dump(int fd,
-        const Vector<String16>& /*args*/) {
-    ATRACE_CALL();
-    String8 result;
-    status_t notLocked;
-    notLocked = mMutex.tryLock();
-    if (notLocked) {
-        result.append("    (Unable to lock queue mutex)\n");
-    }
-    result.appendFormat("      Current frame number: %d\n", mFrameCount);
-    if (mStreamSlotCount == 0) {
-        result.append("      Stream slot: Empty\n");
-        write(fd, result.string(), result.size());
-    } else {
-        result.appendFormat("      Stream slot: %zu entries\n",
-                mStreamSlot.size());
-        int i = 0;
-        for (List<camera_metadata_t*>::iterator r = mStreamSlot.begin();
-             r != mStreamSlot.end(); r++) {
-            result = String8::format("       Stream slot buffer %d:\n", i);
-            write(fd, result.string(), result.size());
-            dump_indented_camera_metadata(*r, fd, 2, 10);
-            i++;
-        }
-    }
-    if (mEntries.size() == 0) {
-        result = "      Main queue is empty\n";
-        write(fd, result.string(), result.size());
-    } else {
-        result = String8::format("      Main queue has %zu entries:\n",
-                mEntries.size());
-        int i = 0;
-        for (List<camera_metadata_t*>::iterator r = mEntries.begin();
-             r != mEntries.end(); r++) {
-            result = String8::format("       Queue entry %d:\n", i);
-            write(fd, result.string(), result.size());
-            dump_indented_camera_metadata(*r, fd, 2, 10);
-            i++;
-        }
-    }
-
-    if (notLocked == 0) {
-        mMutex.unlock();
-    }
-
-    return OK;
-}
-
-status_t Camera2Device::MetadataQueue::signalConsumerLocked() {
-    ATRACE_CALL();
-    status_t res = OK;
-    notEmpty.signal();
-    if (mSignalConsumer && mHal2Device != NULL) {
-        mSignalConsumer = false;
-
-        mMutex.unlock();
-        ALOGV("%s: Signaling consumer", __FUNCTION__);
-        res = mHal2Device->ops->notify_request_queue_not_empty(mHal2Device);
-        mMutex.lock();
-    }
-    return res;
-}
-
-status_t Camera2Device::MetadataQueue::freeBuffers(
-        List<camera_metadata_t*>::iterator start,
-        List<camera_metadata_t*>::iterator end)
-{
-    ATRACE_CALL();
-    while (start != end) {
-        free_camera_metadata(*start);
-        start = mStreamSlot.erase(start);
-    }
-    return OK;
-}
-
-Camera2Device::MetadataQueue* Camera2Device::MetadataQueue::getInstance(
-        const camera2_request_queue_src_ops_t *q)
-{
-    const MetadataQueue* cmq = static_cast<const MetadataQueue*>(q);
-    return const_cast<MetadataQueue*>(cmq);
-}
-
-Camera2Device::MetadataQueue* Camera2Device::MetadataQueue::getInstance(
-        const camera2_frame_queue_dst_ops_t *q)
-{
-    const MetadataQueue* cmq = static_cast<const MetadataQueue*>(q);
-    return const_cast<MetadataQueue*>(cmq);
-}
-
-int Camera2Device::MetadataQueue::consumer_buffer_count(
-        const camera2_request_queue_src_ops_t *q)
-{
-    MetadataQueue *queue = getInstance(q);
-    return queue->getBufferCount();
-}
-
-int Camera2Device::MetadataQueue::consumer_dequeue(
-        const camera2_request_queue_src_ops_t *q,
-        camera_metadata_t **buffer)
-{
-    MetadataQueue *queue = getInstance(q);
-    return queue->dequeue(buffer, true);
-}
-
-int Camera2Device::MetadataQueue::consumer_free(
-        const camera2_request_queue_src_ops_t *q,
-        camera_metadata_t *old_buffer)
-{
-    ATRACE_CALL();
-    MetadataQueue *queue = getInstance(q);
-    (void)queue;
-    free_camera_metadata(old_buffer);
-    return OK;
-}
-
-int Camera2Device::MetadataQueue::producer_dequeue(
-        const camera2_frame_queue_dst_ops_t * /*q*/,
-        size_t entries, size_t bytes,
-        camera_metadata_t **buffer)
-{
-    ATRACE_CALL();
-    camera_metadata_t *new_buffer =
-            allocate_camera_metadata(entries, bytes);
-    if (new_buffer == NULL) return NO_MEMORY;
-    *buffer = new_buffer;
-        return OK;
-}
-
-int Camera2Device::MetadataQueue::producer_cancel(
-        const camera2_frame_queue_dst_ops_t * /*q*/,
-        camera_metadata_t *old_buffer)
-{
-    ATRACE_CALL();
-    free_camera_metadata(old_buffer);
-    return OK;
-}
-
-int Camera2Device::MetadataQueue::producer_enqueue(
-        const camera2_frame_queue_dst_ops_t *q,
-        camera_metadata_t *filled_buffer)
-{
-    MetadataQueue *queue = getInstance(q);
-    return queue->enqueue(filled_buffer);
-}
-
-/**
- * Camera2Device::StreamAdapter
- */
-
-#ifndef container_of
-#define container_of(ptr, type, member) \
-    (type *)((char*)(ptr) - offsetof(type, member))
-#endif
-
-Camera2Device::StreamAdapter::StreamAdapter(camera2_device_t *d):
-        mState(RELEASED),
-        mHal2Device(d),
-        mId(-1),
-        mWidth(0), mHeight(0), mFormat(0), mSize(0), mUsage(0),
-        mMaxProducerBuffers(0), mMaxConsumerBuffers(0),
-        mTotalBuffers(0),
-        mFormatRequested(0),
-        mActiveBuffers(0),
-        mFrameCount(0),
-        mLastTimestamp(0)
-{
-    camera2_stream_ops::dequeue_buffer = dequeue_buffer;
-    camera2_stream_ops::enqueue_buffer = enqueue_buffer;
-    camera2_stream_ops::cancel_buffer = cancel_buffer;
-    camera2_stream_ops::set_crop = set_crop;
-}
-
-Camera2Device::StreamAdapter::~StreamAdapter() {
-    ATRACE_CALL();
-    if (mState != RELEASED) {
-        release();
-    }
-}
-
-status_t Camera2Device::StreamAdapter::connectToDevice(
-        sp<ANativeWindow> consumer,
-        uint32_t width, uint32_t height, int format, size_t size) {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: E", __FUNCTION__);
-
-    if (mState != RELEASED) return INVALID_OPERATION;
-    if (consumer == NULL) {
-        ALOGE("%s: Null consumer passed to stream adapter", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    ALOGV("%s: New stream parameters %d x %d, format 0x%x, size %zu",
-            __FUNCTION__, width, height, format, size);
-
-    mConsumerInterface = consumer;
-    mWidth = width;
-    mHeight = height;
-    mSize = (format == HAL_PIXEL_FORMAT_BLOB) ? size : 0;
-    mFormatRequested = format;
-
-    // Allocate device-side stream interface
-
-    uint32_t id;
-    uint32_t formatActual;
-    uint32_t usage;
-    uint32_t maxBuffers = 2;
-    res = mHal2Device->ops->allocate_stream(mHal2Device,
-            mWidth, mHeight, mFormatRequested, getStreamOps(),
-            &id, &formatActual, &usage, &maxBuffers);
-    if (res != OK) {
-        ALOGE("%s: Device stream allocation failed: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
-
-    ALOGV("%s: Allocated stream id %d, actual format 0x%x, "
-            "usage 0x%x, producer wants %d buffers", __FUNCTION__,
-            id, formatActual, usage, maxBuffers);
-
-    mId = id;
-    mFormat = formatActual;
-    mUsage = usage;
-    mMaxProducerBuffers = maxBuffers;
-
-    mState = ALLOCATED;
-
-    // Configure consumer-side ANativeWindow interface
-    res = native_window_api_connect(mConsumerInterface.get(),
-            NATIVE_WINDOW_API_CAMERA);
-    if (res != OK) {
-        ALOGE("%s: Unable to connect to native window for stream %d",
-                __FUNCTION__, mId);
-
-        return res;
-    }
-
-    mState = CONNECTED;
-
-    res = native_window_set_usage(mConsumerInterface.get(), mUsage);
-    if (res != OK) {
-        ALOGE("%s: Unable to configure usage %08x for stream %d",
-                __FUNCTION__, mUsage, mId);
-        return res;
-    }
-
-    res = native_window_set_scaling_mode(mConsumerInterface.get(),
-            NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-    if (res != OK) {
-        ALOGE("%s: Unable to configure stream scaling: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
-
-    res = setTransform(0);
-    if (res != OK) {
-        return res;
-    }
-
-    if (mFormat == HAL_PIXEL_FORMAT_BLOB) {
-        res = native_window_set_buffers_dimensions(mConsumerInterface.get(),
-                mSize, 1);
-        if (res != OK) {
-            ALOGE("%s: Unable to configure compressed stream buffer dimensions"
-                    " %d x %d, size %zu for stream %d",
-                    __FUNCTION__, mWidth, mHeight, mSize, mId);
-            return res;
-        }
-    } else {
-        res = native_window_set_buffers_dimensions(mConsumerInterface.get(),
-                mWidth, mHeight);
-        if (res != OK) {
-            ALOGE("%s: Unable to configure stream buffer dimensions"
-                    " %d x %d for stream %d",
-                    __FUNCTION__, mWidth, mHeight, mId);
-            return res;
-        }
-    }
-
-    res = native_window_set_buffers_format(mConsumerInterface.get(), mFormat);
-    if (res != OK) {
-        ALOGE("%s: Unable to configure stream buffer format"
-                " %#x for stream %d",
-                __FUNCTION__, mFormat, mId);
-        return res;
-    }
-
-    int maxConsumerBuffers;
-    res = mConsumerInterface->query(mConsumerInterface.get(),
-            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
-    if (res != OK) {
-        ALOGE("%s: Unable to query consumer undequeued"
-                " buffer count for stream %d", __FUNCTION__, mId);
-        return res;
-    }
-    mMaxConsumerBuffers = maxConsumerBuffers;
-
-    ALOGV("%s: Consumer wants %d buffers", __FUNCTION__,
-            mMaxConsumerBuffers);
-
-    mTotalBuffers = mMaxConsumerBuffers + mMaxProducerBuffers;
-    mActiveBuffers = 0;
-    mFrameCount = 0;
-    mLastTimestamp = 0;
-
-    res = native_window_set_buffer_count(mConsumerInterface.get(),
-            mTotalBuffers);
-    if (res != OK) {
-        ALOGE("%s: Unable to set buffer count for stream %d",
-                __FUNCTION__, mId);
-        return res;
-    }
-
-    // Register allocated buffers with HAL device
-    buffer_handle_t *buffers = new buffer_handle_t[mTotalBuffers];
-    ANativeWindowBuffer **anwBuffers = new ANativeWindowBuffer*[mTotalBuffers];
-    uint32_t bufferIdx = 0;
-    for (; bufferIdx < mTotalBuffers; bufferIdx++) {
-        res = native_window_dequeue_buffer_and_wait(mConsumerInterface.get(),
-                &anwBuffers[bufferIdx]);
-        if (res != OK) {
-            ALOGE("%s: Unable to dequeue buffer %d for initial registration for "
-                    "stream %d", __FUNCTION__, bufferIdx, mId);
-            goto cleanUpBuffers;
-        }
-
-        buffers[bufferIdx] = anwBuffers[bufferIdx]->handle;
-        ALOGV("%s: Buffer %p allocated", __FUNCTION__, (void*)buffers[bufferIdx]);
-    }
-
-    ALOGV("%s: Registering %d buffers with camera HAL", __FUNCTION__, mTotalBuffers);
-    res = mHal2Device->ops->register_stream_buffers(mHal2Device,
-            mId,
-            mTotalBuffers,
-            buffers);
-    if (res != OK) {
-        ALOGE("%s: Unable to register buffers with HAL device for stream %d",
-                __FUNCTION__, mId);
-    } else {
-        mState = ACTIVE;
-    }
-
-cleanUpBuffers:
-    ALOGV("%s: Cleaning up %d buffers", __FUNCTION__, bufferIdx);
-    for (uint32_t i = 0; i < bufferIdx; i++) {
-        res = mConsumerInterface->cancelBuffer(mConsumerInterface.get(),
-                anwBuffers[i], -1);
-        if (res != OK) {
-            ALOGE("%s: Unable to cancel buffer %d after registration",
-                    __FUNCTION__, i);
-        }
-    }
-    delete[] anwBuffers;
-    delete[] buffers;
-
-    return res;
-}
-
-status_t Camera2Device::StreamAdapter::release() {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: Releasing stream %d (%d x %d, format %d)", __FUNCTION__, mId,
-            mWidth, mHeight, mFormat);
-    if (mState >= ALLOCATED) {
-        res = mHal2Device->ops->release_stream(mHal2Device, mId);
-        if (res != OK) {
-            ALOGE("%s: Unable to release stream %d",
-                    __FUNCTION__, mId);
-            return res;
-        }
-    }
-    if (mState >= CONNECTED) {
-        res = native_window_api_disconnect(mConsumerInterface.get(),
-                NATIVE_WINDOW_API_CAMERA);
-
-        /* this is not an error. if client calling process dies,
-           the window will also die and all calls to it will return
-           DEAD_OBJECT, thus it's already "disconnected" */
-        if (res == DEAD_OBJECT) {
-            ALOGW("%s: While disconnecting stream %d from native window, the"
-                  " native window died from under us", __FUNCTION__, mId);
-        }
-        else if (res != OK) {
-            ALOGE("%s: Unable to disconnect stream %d from native window (error %d %s)",
-                    __FUNCTION__, mId, res, strerror(-res));
-            return res;
-        }
-    }
-    mId = -1;
-    mState = RELEASED;
-    return OK;
-}
-
-status_t Camera2Device::StreamAdapter::setTransform(int transform) {
-    ATRACE_CALL();
-    status_t res;
-    if (mState < CONNECTED) {
-        ALOGE("%s: Cannot set transform on unconnected stream", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-    res = native_window_set_buffers_transform(mConsumerInterface.get(),
-                                              transform);
-    if (res != OK) {
-        ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
-                __FUNCTION__, transform, strerror(-res), res);
-    }
-    return res;
-}
-
-status_t Camera2Device::StreamAdapter::dump(int fd,
-        const Vector<String16>& /*args*/) {
-    ATRACE_CALL();
-    String8 result = String8::format("      Stream %d: %d x %d, format 0x%x\n",
-            mId, mWidth, mHeight, mFormat);
-    result.appendFormat("        size %zu, usage 0x%x, requested format 0x%x\n",
-            mSize, mUsage, mFormatRequested);
-    result.appendFormat("        total buffers: %d, dequeued buffers: %d\n",
-            mTotalBuffers, mActiveBuffers);
-    result.appendFormat("        frame count: %d, last timestamp %" PRId64 "\n",
-            mFrameCount, mLastTimestamp);
-    write(fd, result.string(), result.size());
-    return OK;
-}
-
-const camera2_stream_ops *Camera2Device::StreamAdapter::getStreamOps() {
-    return static_cast<camera2_stream_ops *>(this);
-}
-
-ANativeWindow* Camera2Device::StreamAdapter::toANW(
-        const camera2_stream_ops_t *w) {
-    return static_cast<const StreamAdapter*>(w)->mConsumerInterface.get();
-}
-
-int Camera2Device::StreamAdapter::dequeue_buffer(const camera2_stream_ops_t *w,
-        buffer_handle_t** buffer) {
-    ATRACE_CALL();
-    int res;
-    StreamAdapter* stream =
-            const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
-    if (stream->mState != ACTIVE) {
-        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
-        return INVALID_OPERATION;
-    }
-
-    ANativeWindow *a = toANW(w);
-    ANativeWindowBuffer* anb;
-    res = native_window_dequeue_buffer_and_wait(a, &anb);
-    if (res != OK) {
-        ALOGE("Stream %d dequeue: Error from native_window: %s (%d)", stream->mId,
-                strerror(-res), res);
-        return res;
-    }
-
-    *buffer = &(anb->handle);
-    stream->mActiveBuffers++;
-
-    ALOGVV("Stream %d dequeue: Buffer %p dequeued", stream->mId, (void*)(**buffer));
-    return res;
-}
-
-int Camera2Device::StreamAdapter::enqueue_buffer(const camera2_stream_ops_t* w,
-        int64_t timestamp,
-        buffer_handle_t* buffer) {
-    ATRACE_CALL();
-    StreamAdapter *stream =
-            const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
-    stream->mFrameCount++;
-    ALOGVV("Stream %d enqueue: Frame %d (%p) captured at %lld ns",
-            stream->mId, stream->mFrameCount, (void*)(*buffer), timestamp);
-    int state = stream->mState;
-    if (state != ACTIVE) {
-        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
-        return INVALID_OPERATION;
-    }
-    ANativeWindow *a = toANW(w);
-    status_t err;
-
-    err = native_window_set_buffers_timestamp(a, timestamp);
-    if (err != OK) {
-        ALOGE("%s: Error setting timestamp on native window: %s (%d)",
-                __FUNCTION__, strerror(-err), err);
-        return err;
-    }
-    err = a->queueBuffer(a,
-            container_of(buffer, ANativeWindowBuffer, handle), -1);
-    if (err != OK) {
-        ALOGE("%s: Error queueing buffer to native window: %s (%d)",
-                __FUNCTION__, strerror(-err), err);
-        return err;
-    }
-
-    stream->mActiveBuffers--;
-    stream->mLastTimestamp = timestamp;
-    return OK;
-}
-
-int Camera2Device::StreamAdapter::cancel_buffer(const camera2_stream_ops_t* w,
-        buffer_handle_t* buffer) {
-    ATRACE_CALL();
-    StreamAdapter *stream =
-            const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
-    ALOGVV("Stream %d cancel: Buffer %p",
-            stream->mId, (void*)(*buffer));
-    if (stream->mState != ACTIVE) {
-        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
-        return INVALID_OPERATION;
-    }
-
-    ANativeWindow *a = toANW(w);
-    int err = a->cancelBuffer(a,
-            container_of(buffer, ANativeWindowBuffer, handle), -1);
-    if (err != OK) {
-        ALOGE("%s: Error canceling buffer to native window: %s (%d)",
-                __FUNCTION__, strerror(-err), err);
-        return err;
-    }
-
-    stream->mActiveBuffers--;
-    return OK;
-}
-
-int Camera2Device::StreamAdapter::set_crop(const camera2_stream_ops_t* w,
-        int left, int top, int right, int bottom) {
-    ATRACE_CALL();
-    int state = static_cast<const StreamAdapter*>(w)->mState;
-    if (state != ACTIVE) {
-        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
-        return INVALID_OPERATION;
-    }
-    ANativeWindow *a = toANW(w);
-    android_native_rect_t crop = { left, top, right, bottom };
-    return native_window_set_crop(a, &crop);
-}
-
-/**
- * Camera2Device::ReprocessStreamAdapter
- */
-
-#ifndef container_of
-#define container_of(ptr, type, member) \
-    (type *)((char*)(ptr) - offsetof(type, member))
-#endif
-
-Camera2Device::ReprocessStreamAdapter::ReprocessStreamAdapter(camera2_device_t *d):
-        mState(RELEASED),
-        mHal2Device(d),
-        mId(-1),
-        mWidth(0), mHeight(0), mFormat(0),
-        mActiveBuffers(0),
-        mFrameCount(0)
-{
-    ATRACE_CALL();
-    camera2_stream_in_ops::acquire_buffer = acquire_buffer;
-    camera2_stream_in_ops::release_buffer = release_buffer;
-}
-
-Camera2Device::ReprocessStreamAdapter::~ReprocessStreamAdapter() {
-    ATRACE_CALL();
-    if (mState != RELEASED) {
-        release();
-    }
-}
-
-status_t Camera2Device::ReprocessStreamAdapter::connectToDevice(
-        const sp<StreamAdapter> &outputStream) {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: E", __FUNCTION__);
-
-    if (mState != RELEASED) return INVALID_OPERATION;
-    if (outputStream == NULL) {
-        ALOGE("%s: Null base stream passed to reprocess stream adapter",
-                __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    mBaseStream = outputStream;
-    mWidth = outputStream->getWidth();
-    mHeight = outputStream->getHeight();
-    mFormat = outputStream->getFormat();
-
-    ALOGV("%s: New reprocess stream parameters %d x %d, format 0x%x",
-            __FUNCTION__, mWidth, mHeight, mFormat);
-
-    // Allocate device-side stream interface
-
-    uint32_t id;
-    res = mHal2Device->ops->allocate_reprocess_stream_from_stream(mHal2Device,
-            outputStream->getId(), getStreamOps(),
-            &id);
-    if (res != OK) {
-        ALOGE("%s: Device reprocess stream allocation failed: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
-
-    ALOGV("%s: Allocated reprocess stream id %d based on stream %d",
-            __FUNCTION__, id, outputStream->getId());
-
-    mId = id;
-
-    mState = ACTIVE;
-
-    return OK;
-}
-
-status_t Camera2Device::ReprocessStreamAdapter::release() {
-    ATRACE_CALL();
-    status_t res;
-    ALOGV("%s: Releasing stream %d", __FUNCTION__, mId);
-    if (mState >= ACTIVE) {
-        res = mHal2Device->ops->release_reprocess_stream(mHal2Device, mId);
-        if (res != OK) {
-            ALOGE("%s: Unable to release stream %d",
-                    __FUNCTION__, mId);
-            return res;
-        }
-    }
-
-    List<QueueEntry>::iterator s;
-    for (s = mQueue.begin(); s != mQueue.end(); s++) {
-        sp<BufferReleasedListener> listener = s->releaseListener.promote();
-        if (listener != 0) listener->onBufferReleased(s->handle);
-    }
-    for (s = mInFlightQueue.begin(); s != mInFlightQueue.end(); s++) {
-        sp<BufferReleasedListener> listener = s->releaseListener.promote();
-        if (listener != 0) listener->onBufferReleased(s->handle);
-    }
-    mQueue.clear();
-    mInFlightQueue.clear();
-
-    mState = RELEASED;
-    return OK;
-}
-
-status_t Camera2Device::ReprocessStreamAdapter::pushIntoStream(
-    buffer_handle_t *handle, const wp<BufferReleasedListener> &releaseListener) {
-    ATRACE_CALL();
-    // TODO: Some error checking here would be nice
-    ALOGV("%s: Pushing buffer %p to stream", __FUNCTION__, (void*)(*handle));
-
-    QueueEntry entry;
-    entry.handle = handle;
-    entry.releaseListener = releaseListener;
-    mQueue.push_back(entry);
-    return OK;
-}
-
-status_t Camera2Device::ReprocessStreamAdapter::dump(int fd,
-        const Vector<String16>& /*args*/) {
-    ATRACE_CALL();
-    String8 result =
-            String8::format("      Reprocess stream %d: %d x %d, fmt 0x%x\n",
-                    mId, mWidth, mHeight, mFormat);
-    result.appendFormat("        acquired buffers: %d\n",
-            mActiveBuffers);
-    result.appendFormat("        frame count: %d\n",
-            mFrameCount);
-    write(fd, result.string(), result.size());
-    return OK;
-}
-
-const camera2_stream_in_ops *Camera2Device::ReprocessStreamAdapter::getStreamOps() {
-    return static_cast<camera2_stream_in_ops *>(this);
-}
-
-int Camera2Device::ReprocessStreamAdapter::acquire_buffer(
-    const camera2_stream_in_ops_t *w,
-        buffer_handle_t** buffer) {
-    ATRACE_CALL();
-
-    ReprocessStreamAdapter* stream =
-            const_cast<ReprocessStreamAdapter*>(
-                static_cast<const ReprocessStreamAdapter*>(w));
-    if (stream->mState != ACTIVE) {
-        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
-        return INVALID_OPERATION;
-    }
-
-    if (stream->mQueue.empty()) {
-        *buffer = NULL;
-        return OK;
-    }
-
-    QueueEntry &entry = *(stream->mQueue.begin());
-
-    *buffer = entry.handle;
-
-    stream->mInFlightQueue.push_back(entry);
-    stream->mQueue.erase(stream->mQueue.begin());
-
-    stream->mActiveBuffers++;
-
-    ALOGV("Stream %d acquire: Buffer %p acquired", stream->mId,
-            (void*)(**buffer));
-    return OK;
-}
-
-int Camera2Device::ReprocessStreamAdapter::release_buffer(
-    const camera2_stream_in_ops_t* w,
-    buffer_handle_t* buffer) {
-    ATRACE_CALL();
-    ReprocessStreamAdapter *stream =
-            const_cast<ReprocessStreamAdapter*>(
-                static_cast<const ReprocessStreamAdapter*>(w) );
-    stream->mFrameCount++;
-    ALOGV("Reprocess stream %d release: Frame %d (%p)",
-            stream->mId, stream->mFrameCount, (void*)*buffer);
-    int state = stream->mState;
-    if (state != ACTIVE) {
-        ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
-        return INVALID_OPERATION;
-    }
-    stream->mActiveBuffers--;
-
-    List<QueueEntry>::iterator s;
-    for (s = stream->mInFlightQueue.begin(); s != stream->mInFlightQueue.end(); s++) {
-        if ( s->handle == buffer ) break;
-    }
-    if (s == stream->mInFlightQueue.end()) {
-        ALOGE("%s: Can't find buffer %p in in-flight list!", __FUNCTION__,
-                buffer);
-        return INVALID_OPERATION;
-    }
-
-    sp<BufferReleasedListener> listener = s->releaseListener.promote();
-    if (listener != 0) {
-        listener->onBufferReleased(s->handle);
-    } else {
-        ALOGE("%s: Can't free buffer - missing listener", __FUNCTION__);
-    }
-    stream->mInFlightQueue.erase(s);
-
-    return OK;
-}
-
-// camera 2 devices don't support reprocessing
-status_t Camera2Device::createInputStream(
-    uint32_t width, uint32_t height, int format, int *id) {
-    ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__);
-    return INVALID_OPERATION;
-}
-
-// camera 2 devices don't support reprocessing
-status_t Camera2Device::getInputBufferProducer(
-        sp<IGraphicBufferProducer> *producer) {
-    ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__);
-    return INVALID_OPERATION;
-}
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
deleted file mode 100644
index b4d343c..0000000
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2DEVICE_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2DEVICE_H
-
-#include <utils/Condition.h>
-#include <utils/Errors.h>
-#include <utils/List.h>
-#include <utils/Mutex.h>
-
-#include "common/CameraDeviceBase.h"
-
-namespace android {
-
-/**
- * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_2_0
- *
- * TODO for camera2 API implementation:
- * Does not produce notifyShutter / notifyIdle callbacks to NotificationListener
- * Use waitUntilDrained for idle.
- */
-class Camera2Device: public CameraDeviceBase {
-  public:
-    Camera2Device(int id);
-
-    virtual ~Camera2Device();
-
-    /**
-     * CameraDevice interface
-     */
-    virtual int      getId() const;
-    virtual status_t initialize(CameraModule *module);
-    virtual status_t disconnect();
-    virtual status_t dump(int fd, const Vector<String16>& args);
-    virtual const CameraMetadata& info() const;
-    virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL);
-    virtual status_t captureList(const List<const CameraMetadata> &requests,
-                                 int64_t *lastFrameNumber = NULL);
-    virtual status_t setStreamingRequest(const CameraMetadata &request,
-                                         int64_t *lastFrameNumber = NULL);
-    virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
-                                             int64_t *lastFrameNumber = NULL);
-    virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL);
-    virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
-    virtual status_t createStream(sp<Surface> consumer,
-            uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id);
-    virtual status_t createInputStream(
-            uint32_t width, uint32_t height, int format, int *id);
-    virtual status_t createReprocessStreamFromStream(int outputId, int *id);
-    virtual status_t getStreamInfo(int id,
-            uint32_t *width, uint32_t *height,
-            uint32_t *format, android_dataspace *dataSpace);
-    virtual status_t setStreamTransform(int id, int transform);
-    virtual status_t deleteStream(int id);
-    virtual status_t deleteReprocessStream(int id);
-    // No-op on HAL2 devices
-    virtual status_t configureStreams(bool isConstrainedHighSpeed = false);
-    virtual status_t getInputBufferProducer(
-            sp<IGraphicBufferProducer> *producer);
-    virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
-    virtual status_t waitUntilDrained();
-    virtual status_t setNotifyCallback(NotificationListener *listener);
-    virtual bool     willNotify3A();
-    virtual status_t waitForNextFrame(nsecs_t timeout);
-    virtual status_t getNextResult(CaptureResult *frame);
-    virtual status_t triggerAutofocus(uint32_t id);
-    virtual status_t triggerCancelAutofocus(uint32_t id);
-    virtual status_t triggerPrecaptureMetering(uint32_t id);
-    virtual status_t pushReprocessBuffer(int reprocessStreamId,
-            buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
-    // Flush implemented as just a wait
-    virtual status_t flush(int64_t *lastFrameNumber = NULL);
-    // Prepare and tearDown are no-ops
-    virtual status_t prepare(int streamId);
-    virtual status_t tearDown(int streamId);
-    virtual status_t prepare(int maxCount, int streamId);
-
-    virtual uint32_t getDeviceVersion();
-    virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
-
-  private:
-    const int mId;
-    camera2_device_t *mHal2Device;
-
-    CameraMetadata mDeviceInfo;
-
-    uint32_t mDeviceVersion;
-
-    /**
-     * Queue class for both sending requests to a camera2 device, and for
-     * receiving frames from a camera2 device.
-     */
-    class MetadataQueue: public camera2_request_queue_src_ops_t,
-                         public camera2_frame_queue_dst_ops_t {
-      public:
-        MetadataQueue();
-        ~MetadataQueue();
-
-        // Interface to camera2 HAL device, either for requests (device is
-        // consumer) or for frames (device is producer)
-        const camera2_request_queue_src_ops_t*   getToConsumerInterface();
-        void setFromConsumerInterface(camera2_device_t *d);
-
-        // Connect queue consumer endpoint to a camera2 device
-        status_t setConsumerDevice(camera2_device_t *d);
-        // Connect queue producer endpoint to a camera2 device
-        status_t setProducerDevice(camera2_device_t *d);
-
-        const camera2_frame_queue_dst_ops_t* getToProducerInterface();
-
-        // Real interfaces. On enqueue, queue takes ownership of buffer pointer
-        // On dequeue, user takes ownership of buffer pointer.
-        status_t enqueue(camera_metadata_t *buf);
-        status_t dequeue(camera_metadata_t **buf, bool incrementCount = false);
-        int      getBufferCount();
-        status_t waitForBuffer(nsecs_t timeout);
-        // Wait until a buffer with the given ID is dequeued. Will return
-        // immediately if the latest buffer dequeued has that ID.
-        status_t waitForDequeue(int32_t id, nsecs_t timeout);
-
-        // Set repeating buffer(s); if the queue is empty on a dequeue call, the
-        // queue copies the contents of the stream slot into the queue, and then
-        // dequeues the first new entry. The methods take the ownership of the
-        // metadata buffers passed in.
-        status_t setStreamSlot(camera_metadata_t *buf);
-        status_t setStreamSlot(const List<camera_metadata_t*> &bufs);
-
-        // Clear the request queue and the streaming slot
-        status_t clear();
-
-        status_t dump(int fd, const Vector<String16>& args);
-
-      private:
-        status_t signalConsumerLocked();
-        status_t freeBuffers(List<camera_metadata_t*>::iterator start,
-                List<camera_metadata_t*>::iterator end);
-
-        camera2_device_t *mHal2Device;
-
-        Mutex mMutex;
-        Condition notEmpty;
-
-        int mFrameCount;
-        int32_t mLatestRequestId;
-        Condition mNewRequestId;
-
-        int mCount;
-        List<camera_metadata_t*> mEntries;
-        int mStreamSlotCount;
-        List<camera_metadata_t*> mStreamSlot;
-
-        bool mSignalConsumer;
-
-        static MetadataQueue* getInstance(
-            const camera2_frame_queue_dst_ops_t *q);
-        static MetadataQueue* getInstance(
-            const camera2_request_queue_src_ops_t *q);
-
-        static int consumer_buffer_count(
-            const camera2_request_queue_src_ops_t *q);
-
-        static int consumer_dequeue(const camera2_request_queue_src_ops_t *q,
-            camera_metadata_t **buffer);
-
-        static int consumer_free(const camera2_request_queue_src_ops_t *q,
-                camera_metadata_t *old_buffer);
-
-        static int producer_dequeue(const camera2_frame_queue_dst_ops_t *q,
-                size_t entries, size_t bytes,
-                camera_metadata_t **buffer);
-
-        static int producer_cancel(const camera2_frame_queue_dst_ops_t *q,
-            camera_metadata_t *old_buffer);
-
-        static int producer_enqueue(const camera2_frame_queue_dst_ops_t *q,
-                camera_metadata_t *filled_buffer);
-
-    }; // class MetadataQueue
-
-    MetadataQueue mRequestQueue;
-    MetadataQueue mFrameQueue;
-
-    /**
-     * Adapter from an ANativeWindow interface to camera2 device stream ops.
-     * Also takes care of allocating/deallocating stream in device interface
-     */
-    class StreamAdapter: public camera2_stream_ops, public virtual RefBase {
-      public:
-        StreamAdapter(camera2_device_t *d);
-
-        ~StreamAdapter();
-
-        /**
-         * Create a HAL device stream of the requested size and format.
-         *
-         * If format is CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, then the HAL device
-         * selects an appropriate format; it can be queried with getFormat.
-         *
-         * If format is HAL_PIXEL_FORMAT_COMPRESSED, the size parameter must
-         * be equal to the size in bytes of the buffers to allocate for the
-         * stream. For other formats, the size parameter is ignored.
-         */
-        status_t connectToDevice(sp<ANativeWindow> consumer,
-                uint32_t width, uint32_t height, int format, size_t size);
-
-        status_t release();
-
-        status_t setTransform(int transform);
-
-        // Get stream parameters.
-        // Only valid after a successful connectToDevice call.
-        int      getId() const     { return mId; }
-        uint32_t getWidth() const  { return mWidth; }
-        uint32_t getHeight() const { return mHeight; }
-        uint32_t getFormat() const { return mFormat; }
-
-        // Dump stream information
-        status_t dump(int fd, const Vector<String16>& args);
-
-      private:
-        enum {
-            ERROR = -1,
-            RELEASED = 0,
-            ALLOCATED,
-            CONNECTED,
-            ACTIVE
-        } mState;
-
-        sp<ANativeWindow> mConsumerInterface;
-        camera2_device_t *mHal2Device;
-
-        uint32_t mId;
-        uint32_t mWidth;
-        uint32_t mHeight;
-        uint32_t mFormat;
-        size_t   mSize;
-        uint32_t mUsage;
-        uint32_t mMaxProducerBuffers;
-        uint32_t mMaxConsumerBuffers;
-        uint32_t mTotalBuffers;
-        int mFormatRequested;
-
-        /** Debugging information */
-        uint32_t mActiveBuffers;
-        uint32_t mFrameCount;
-        int64_t  mLastTimestamp;
-
-        const camera2_stream_ops *getStreamOps();
-
-        static ANativeWindow* toANW(const camera2_stream_ops_t *w);
-
-        static int dequeue_buffer(const camera2_stream_ops_t *w,
-                buffer_handle_t** buffer);
-
-        static int enqueue_buffer(const camera2_stream_ops_t* w,
-                int64_t timestamp,
-                buffer_handle_t* buffer);
-
-        static int cancel_buffer(const camera2_stream_ops_t* w,
-                buffer_handle_t* buffer);
-
-        static int set_crop(const camera2_stream_ops_t* w,
-                int left, int top, int right, int bottom);
-    }; // class StreamAdapter
-
-    typedef List<sp<StreamAdapter> > StreamList;
-    StreamList mStreams;
-
-    /**
-     * Adapter from an ANativeWindow interface to camera2 device stream ops.
-     * Also takes care of allocating/deallocating stream in device interface
-     */
-    class ReprocessStreamAdapter: public camera2_stream_in_ops, public virtual RefBase {
-      public:
-        ReprocessStreamAdapter(camera2_device_t *d);
-
-        ~ReprocessStreamAdapter();
-
-        /**
-         * Create a HAL device reprocess stream based on an existing output stream.
-         */
-        status_t connectToDevice(const sp<StreamAdapter> &outputStream);
-
-        status_t release();
-
-        /**
-         * Push buffer into stream for reprocessing. Takes ownership until it notifies
-         * that the buffer has been released
-         */
-        status_t pushIntoStream(buffer_handle_t *handle,
-                const wp<BufferReleasedListener> &releaseListener);
-
-        /**
-         * Get stream parameters.
-         * Only valid after a successful connectToDevice call.
-         */
-        int      getId() const     { return mId; }
-        uint32_t getWidth() const  { return mWidth; }
-        uint32_t getHeight() const { return mHeight; }
-        uint32_t getFormat() const { return mFormat; }
-
-        // Dump stream information
-        status_t dump(int fd, const Vector<String16>& args);
-
-      private:
-        enum {
-            ERROR = -1,
-            RELEASED = 0,
-            ACTIVE
-        } mState;
-
-        sp<ANativeWindow> mConsumerInterface;
-        wp<StreamAdapter> mBaseStream;
-
-        struct QueueEntry {
-            buffer_handle_t *handle;
-            wp<BufferReleasedListener> releaseListener;
-        };
-
-        List<QueueEntry> mQueue;
-
-        List<QueueEntry> mInFlightQueue;
-
-        camera2_device_t *mHal2Device;
-
-        uint32_t mId;
-        uint32_t mWidth;
-        uint32_t mHeight;
-        uint32_t mFormat;
-
-        /** Debugging information */
-        uint32_t mActiveBuffers;
-        uint32_t mFrameCount;
-        int64_t  mLastTimestamp;
-
-        const camera2_stream_in_ops *getStreamOps();
-
-        static int acquire_buffer(const camera2_stream_in_ops_t *w,
-                buffer_handle_t** buffer);
-
-        static int release_buffer(const camera2_stream_in_ops_t* w,
-                buffer_handle_t* buffer);
-
-    }; // class ReprocessStreamAdapter
-
-    typedef List<sp<ReprocessStreamAdapter> > ReprocessStreamList;
-    ReprocessStreamList mReprocessStreams;
-
-    // Receives HAL notifications and routes them to the NotificationListener
-    static void notificationCallback(int32_t msg_type,
-            int32_t ext1,
-            int32_t ext2,
-            int32_t ext3,
-            void *user);
-
-}; // class Camera2Device
-
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
new file mode 100644
index 0000000..1f01144
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
@@ -0,0 +1,534 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Camera3-BufferManager"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <gui/ISurfaceComposer.h>
+#include <private/gui/ComposerService.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "utils/CameraTraces.h"
+#include "Camera3BufferManager.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3BufferManager::Camera3BufferManager(const sp<IGraphicBufferAlloc>& allocator) :
+        mAllocator(allocator) {
+    if (allocator == NULL) {
+        sp<ISurfaceComposer> composer(ComposerService::getComposerService());
+        mAllocator = composer->createGraphicBufferAlloc();
+        if (mAllocator == NULL) {
+            ALOGE("createGraphicBufferAlloc failed");
+        }
+    }
+}
+
+Camera3BufferManager::~Camera3BufferManager() {
+}
+
+status_t Camera3BufferManager::registerStream(wp<Camera3OutputStream>& stream,
+        const StreamInfo& streamInfo) {
+    ATRACE_CALL();
+
+    int streamId = streamInfo.streamId;
+    int streamSetId = streamInfo.streamSetId;
+
+    if (streamId == CAMERA3_STREAM_ID_INVALID || streamSetId == CAMERA3_STREAM_SET_ID_INVALID) {
+        ALOGE("%s: Stream id (%d) or stream set id (%d) is invalid",
+                __FUNCTION__, streamId, streamSetId);
+        return BAD_VALUE;
+    }
+    if (streamInfo.totalBufferCount > kMaxBufferCount || streamInfo.totalBufferCount == 0) {
+        ALOGE("%s: Stream id (%d) with stream set id (%d) total buffer count %zu is invalid",
+                __FUNCTION__, streamId, streamSetId, streamInfo.totalBufferCount);
+        return BAD_VALUE;
+    }
+    if (!streamInfo.isConfigured) {
+        ALOGE("%s: Stream (%d) is not configured", __FUNCTION__, streamId);
+        return BAD_VALUE;
+    }
+
+    // For Gralloc v1, try to allocate a buffer and see if it is successful, otherwise, stream
+    // buffer sharing for this newly added stream is not supported. For Gralloc v0, we don't
+    // need check this, as the buffers are not really shared between streams, the buffers are
+    // allocated for each stream individually, the allocation failure will be checked in
+    // getBufferForStream() call.
+    if (mGrallocVersion > HARDWARE_DEVICE_API_VERSION(0,1)) {
+        // TODO: To be implemented.
+
+        // In case allocation fails, return invalid operation
+        return INVALID_OPERATION;
+    }
+
+    Mutex::Autolock l(mLock);
+    if (mAllocator == NULL) {
+        ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    // Check if this stream was registered with different stream set ID, if so, error out.
+    for (size_t i = 0; i < mStreamSetMap.size(); i++) {
+        ssize_t streamIdx = mStreamSetMap[i].streamInfoMap.indexOfKey(streamId);
+        if (streamIdx != NAME_NOT_FOUND &&
+            mStreamSetMap[i].streamInfoMap[streamIdx].streamSetId != streamInfo.streamSetId) {
+            ALOGE("%s: It is illegal to register the same stream id with different stream set",
+                    __FUNCTION__);
+            return BAD_VALUE;
+        }
+    }
+    // Check if there is an existing stream set registered; if not, create one; otherwise, add this
+    // stream info to the existing stream set entry.
+    ssize_t setIdx = mStreamSetMap.indexOfKey(streamSetId);
+    if (setIdx == NAME_NOT_FOUND) {
+        ALOGV("%s: stream set %d is not registered to stream set map yet, create it.",
+                __FUNCTION__, streamSetId);
+        // Create stream info map, then add to mStreamsetMap.
+        StreamSet newStreamSet;
+        setIdx = mStreamSetMap.add(streamSetId, newStreamSet);
+    }
+    // Update stream set map and water mark.
+    StreamSet& currentStreamSet = mStreamSetMap.editValueAt(setIdx);
+    ssize_t streamIdx = currentStreamSet.streamInfoMap.indexOfKey(streamId);
+    if (streamIdx != NAME_NOT_FOUND) {
+        ALOGW("%s: stream %d was already registered with stream set %d",
+                __FUNCTION__, streamId, streamSetId);
+        return OK;
+    }
+    currentStreamSet.streamInfoMap.add(streamId, streamInfo);
+    currentStreamSet.handoutBufferCountMap.add(streamId, 0);
+    currentStreamSet.attachedBufferCountMap.add(streamId, 0);
+    mStreamMap.add(streamId, stream);
+
+    // The max allowed buffer count should be the max of buffer count of each stream inside a stream
+    // set.
+    if (streamInfo.totalBufferCount > currentStreamSet.maxAllowedBufferCount) {
+       currentStreamSet.maxAllowedBufferCount = streamInfo.totalBufferCount;
+    }
+
+    return OK;
+}
+
+status_t Camera3BufferManager::unregisterStream(int streamId, int streamSetId) {
+    ATRACE_CALL();
+
+    Mutex::Autolock l(mLock);
+    ALOGV("%s: unregister stream %d with stream set %d", __FUNCTION__,
+            streamId, streamSetId);
+    if (mAllocator == NULL) {
+        ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
+        ALOGE("%s: stream %d with set id %d wasn't properly registered to this buffer manager!",
+                __FUNCTION__, streamId, streamSetId);
+        return BAD_VALUE;
+    }
+
+    // De-list all the buffers associated with this stream first.
+    StreamSet& currentSet = mStreamSetMap.editValueFor(streamSetId);
+    BufferList& freeBufs = currentSet.freeBuffers;
+    BufferCountMap& handOutBufferCounts = currentSet.handoutBufferCountMap;
+    BufferCountMap& attachedBufferCounts = currentSet.attachedBufferCountMap;
+    InfoMap& infoMap = currentSet.streamInfoMap;
+    removeBuffersFromBufferListLocked(freeBufs, streamId);
+    handOutBufferCounts.removeItem(streamId);
+    attachedBufferCounts.removeItem(streamId);
+
+    // Remove the stream info from info map and recalculate the buffer count water mark.
+    infoMap.removeItem(streamId);
+    currentSet.maxAllowedBufferCount = 0;
+    for (size_t i = 0; i < infoMap.size(); i++) {
+        if (infoMap[i].totalBufferCount > currentSet.maxAllowedBufferCount) {
+            currentSet.maxAllowedBufferCount = infoMap[i].totalBufferCount;
+        }
+    }
+    mStreamMap.removeItem(streamId);
+
+    // Lazy solution: when a stream is unregistered, the streams will be reconfigured, reset
+    // the water mark and let it grow again.
+    currentSet.allocatedBufferWaterMark = 0;
+
+    // Remove this stream set if all its streams have been removed.
+    if (freeBufs.size() == 0 && handOutBufferCounts.size() == 0 && infoMap.size() == 0) {
+        mStreamSetMap.removeItem(streamSetId);
+    }
+
+    return OK;
+}
+
+status_t Camera3BufferManager::getBufferForStream(int streamId, int streamSetId,
+        sp<GraphicBuffer>* gb, int* fenceFd) {
+    ATRACE_CALL();
+
+    Mutex::Autolock l(mLock);
+    ALOGV("%s: get buffer for stream %d with stream set %d", __FUNCTION__,
+            streamId, streamSetId);
+    if (mAllocator == NULL) {
+        ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    if (!checkIfStreamRegisteredLocked(streamId, streamSetId)) {
+        ALOGE("%s: stream %d is not registered with stream set %d yet!!!",
+                __FUNCTION__, streamId, streamSetId);
+        return BAD_VALUE;
+    }
+
+    StreamSet &streamSet = mStreamSetMap.editValueFor(streamSetId);
+    BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
+    size_t& bufferCount = handOutBufferCounts.editValueFor(streamId);
+    if (bufferCount >= streamSet.maxAllowedBufferCount) {
+        ALOGE("%s: bufferCount (%zu) exceeds the max allowed buffer count (%zu) of this stream set",
+                __FUNCTION__, bufferCount, streamSet.maxAllowedBufferCount);
+        return INVALID_OPERATION;
+    }
+
+    BufferCountMap& attachedBufferCounts = streamSet.attachedBufferCountMap;
+    size_t& attachedBufferCount = attachedBufferCounts.editValueFor(streamId);
+    if (attachedBufferCount > bufferCount) {
+        // We've already attached more buffers to this stream than we currently have
+        // outstanding, so have the stream just use an already-attached buffer
+        bufferCount++;
+        return ALREADY_EXISTS;
+    }
+    ALOGV("Stream %d set %d: Get buffer for stream: Allocate new", streamId, streamSetId);
+
+    GraphicBufferEntry buffer =
+            getFirstBufferFromBufferListLocked(streamSet.freeBuffers, streamId);
+
+    if (mGrallocVersion < HARDWARE_DEVICE_API_VERSION(1,0)) {
+        // Allocate one if there is no free buffer available.
+        if (buffer.graphicBuffer == nullptr) {
+            const StreamInfo& info = streamSet.streamInfoMap.valueFor(streamId);
+            status_t res = OK;
+            buffer.fenceFd = -1;
+            buffer.graphicBuffer = mAllocator->createGraphicBuffer(
+                    info.width, info.height, info.format, info.combinedUsage, &res);
+            ALOGV("%s: allocating a new graphic buffer (%dx%d, format 0x%x) %p with handle %p",
+                    __FUNCTION__, info.width, info.height, info.format,
+                    buffer.graphicBuffer.get(), buffer.graphicBuffer->handle);
+            if (res != OK) {
+                ALOGE("%s: graphic buffer allocation failed: (error %d %s) ",
+                        __FUNCTION__, res, strerror(-res));
+                return res;
+            }
+            ALOGV("%s: allocation done", __FUNCTION__);
+        }
+
+        // Increase the hand-out and attached buffer counts for tracking purposes.
+        bufferCount++;
+        attachedBufferCount++;
+        // Update the water mark to be the max hand-out buffer count + 1. An additional buffer is
+        // added to reduce the chance of buffer allocation during stream steady state, especially
+        // for cases where one stream is active, the other stream may request some buffers randomly.
+        if (bufferCount + 1 > streamSet.allocatedBufferWaterMark) {
+            streamSet.allocatedBufferWaterMark = bufferCount + 1;
+        }
+        *gb = buffer.graphicBuffer;
+        *fenceFd = buffer.fenceFd;
+        ALOGV("%s: get buffer (%p) with handle (%p).",
+                __FUNCTION__, buffer.graphicBuffer.get(), buffer.graphicBuffer->handle);
+
+        // Proactively free buffers for other streams if the current number of allocated buffers
+        // exceeds the water mark. This only for Gralloc V1, for V2, this logic can also be handled
+        // in returnBufferForStream() if we want to free buffer more quickly.
+        // TODO: probably should find out all the inactive stream IDs, and free the firstly found
+        // buffers for them.
+        StreamId firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
+        if (streamSet.streamInfoMap.size() > 1) {
+            bool freeBufferIsAttached = false;
+            for (size_t i = 0; i < streamSet.streamInfoMap.size(); i++) {
+                firstOtherStreamId = streamSet.streamInfoMap[i].streamId;
+                if (firstOtherStreamId != streamId) {
+
+                    size_t otherBufferCount  =
+                            streamSet.handoutBufferCountMap.valueFor(firstOtherStreamId);
+                    size_t otherAttachedBufferCount =
+                            streamSet.attachedBufferCountMap.valueFor(firstOtherStreamId);
+                    if (otherAttachedBufferCount > otherBufferCount) {
+                        freeBufferIsAttached = true;
+                        break;
+                    }
+                    if (hasBufferForStreamLocked(streamSet.freeBuffers, firstOtherStreamId)) {
+                        freeBufferIsAttached = false;
+                        break;
+                    }
+                }
+                firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
+            }
+            if (firstOtherStreamId == CAMERA3_STREAM_ID_INVALID) {
+                return OK;
+            }
+
+            // This will drop the reference to one free buffer, which will effectively free one
+            // buffer (from the free buffer list) for the inactive streams.
+            size_t totalAllocatedBufferCount = streamSet.freeBuffers.size();
+            for (size_t i = 0; i < streamSet.attachedBufferCountMap.size(); i++) {
+                totalAllocatedBufferCount += streamSet.attachedBufferCountMap[i];
+            }
+            if (totalAllocatedBufferCount > streamSet.allocatedBufferWaterMark) {
+                ALOGV("%s: free a buffer from stream %d", __FUNCTION__, firstOtherStreamId);
+                if (freeBufferIsAttached) {
+                    ALOGV("Stream %d: Freeing buffer: detach", firstOtherStreamId);
+                    sp<Camera3OutputStream> stream =
+                            mStreamMap.valueFor(firstOtherStreamId).promote();
+                    if (stream == nullptr) {
+                        ALOGE("%s: unable to promote stream %d to detach buffer", __FUNCTION__,
+                                firstOtherStreamId);
+                        return INVALID_OPERATION;
+                    }
+
+                    // Detach and then drop the buffer.
+                    //
+                    // Need to unlock because the stream may also be calling
+                    // into the buffer manager in parallel to signal buffer
+                    // release, or acquire a new buffer.
+                    {
+                        mLock.unlock();
+                        sp<GraphicBuffer> buffer;
+                        stream->detachBuffer(&buffer, /*fenceFd*/ nullptr);
+                        mLock.lock();
+                    }
+                    size_t& otherAttachedBufferCount =
+                            streamSet.attachedBufferCountMap.editValueFor(firstOtherStreamId);
+                    otherAttachedBufferCount--;
+                } else {
+                    // Droppable buffer is in the free buffer list, grab and drop
+                    getFirstBufferFromBufferListLocked(streamSet.freeBuffers, firstOtherStreamId);
+                }
+            }
+        }
+    } else {
+        // TODO: implement this.
+        return BAD_VALUE;
+    }
+
+    return OK;
+}
+
+status_t Camera3BufferManager::onBufferReleased(int streamId, int streamSetId) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+
+    ALOGV("Stream %d set %d: Buffer released", streamId, streamSetId);
+    if (mAllocator == NULL) {
+        ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
+        ALOGV("%s: signaling buffer release for an already unregistered stream "
+                "(stream %d with set id %d)", __FUNCTION__, streamId, streamSetId);
+        return OK;
+    }
+
+    if (mGrallocVersion < HARDWARE_DEVICE_API_VERSION(1,0)) {
+        StreamSet& streamSet = mStreamSetMap.editValueFor(streamSetId);
+        BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
+        size_t& bufferCount = handOutBufferCounts.editValueFor(streamId);
+        bufferCount--;
+        ALOGV("%s: Stream %d set %d: Buffer count now %zu", __FUNCTION__, streamId, streamSetId,
+                bufferCount);
+    } else {
+        // TODO: implement gralloc V1 support
+        return BAD_VALUE;
+    }
+
+    return OK;
+}
+
+status_t Camera3BufferManager::returnBufferForStream(int streamId,
+        int streamSetId, const sp<GraphicBuffer>& buffer, int fenceFd) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+    ALOGV_IF(buffer != 0, "%s: return buffer (%p) with handle (%p) for stream %d and stream set %d",
+            __FUNCTION__, buffer.get(), buffer->handle, streamId, streamSetId);
+    if (mAllocator == NULL) {
+        ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
+    if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
+        ALOGV("%s: returning buffer for an already unregistered stream (stream %d with set id %d),"
+                "buffer will be dropped right away!", __FUNCTION__, streamId, streamSetId);
+        return OK;
+    }
+
+    if (mGrallocVersion < HARDWARE_DEVICE_API_VERSION(1,0)) {
+        // Add to the freeBuffer list.
+        StreamSet& streamSet = mStreamSetMap.editValueFor(streamSetId);
+        if (buffer != 0) {
+            BufferEntry entry;
+            entry.add(streamId, GraphicBufferEntry(buffer, fenceFd));
+            status_t res = addBufferToBufferListLocked(streamSet.freeBuffers, entry);
+            if (res != OK) {
+                ALOGE("%s: add buffer to free buffer list failed", __FUNCTION__);
+                return res;
+            }
+        }
+
+        // Update the handed out and attached buffer count for this buffer.
+        BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
+        size_t& bufferCount = handOutBufferCounts.editValueFor(streamId);
+        bufferCount--;
+        size_t& attachedBufferCount = streamSet.attachedBufferCountMap.editValueFor(streamId);
+        attachedBufferCount--;
+    } else {
+        // TODO: implement this.
+        return BAD_VALUE;
+    }
+
+    return OK;
+}
+
+void Camera3BufferManager::dump(int fd, const Vector<String16>& args) const {
+    Mutex::Autolock l(mLock);
+
+    (void) args;
+    String8 lines;
+    lines.appendFormat("      Total stream sets: %zu\n", mStreamSetMap.size());
+    for (size_t i = 0; i < mStreamSetMap.size(); i++) {
+        lines.appendFormat("        Stream set %d has below streams:\n", mStreamSetMap.keyAt(i));
+        for (size_t j = 0; j < mStreamSetMap[i].streamInfoMap.size(); j++) {
+            lines.appendFormat("          Stream %d\n", mStreamSetMap[i].streamInfoMap[j].streamId);
+        }
+        lines.appendFormat("          Stream set max allowed buffer count: %zu\n",
+                mStreamSetMap[i].maxAllowedBufferCount);
+        lines.appendFormat("          Stream set buffer count water mark: %zu\n",
+                mStreamSetMap[i].allocatedBufferWaterMark);
+        lines.appendFormat("          Handout buffer counts:\n");
+        for (size_t m = 0; m < mStreamSetMap[i].handoutBufferCountMap.size(); m++) {
+            int streamId = mStreamSetMap[i].handoutBufferCountMap.keyAt(m);
+            size_t bufferCount = mStreamSetMap[i].handoutBufferCountMap.valueAt(m);
+            lines.appendFormat("            stream id: %d, buffer count: %zu.\n",
+                    streamId, bufferCount);
+        }
+        lines.appendFormat("          Attached buffer counts:\n");
+        for (size_t m = 0; m < mStreamSetMap[i].attachedBufferCountMap.size(); m++) {
+            int streamId = mStreamSetMap[i].attachedBufferCountMap.keyAt(m);
+            size_t bufferCount = mStreamSetMap[i].attachedBufferCountMap.valueAt(m);
+            lines.appendFormat("            stream id: %d, attached buffer count: %zu.\n",
+                    streamId, bufferCount);
+        }
+
+        lines.appendFormat("          Free buffer count: %zu\n",
+                mStreamSetMap[i].freeBuffers.size());
+        for (auto& bufEntry : mStreamSetMap[i].freeBuffers) {
+            for (size_t m = 0; m < bufEntry.size(); m++) {
+                const sp<GraphicBuffer>& buffer = bufEntry.valueAt(m).graphicBuffer;
+                int streamId = bufEntry.keyAt(m);
+                lines.appendFormat("            stream id: %d, buffer: %p, handle: %p.\n",
+                        streamId, buffer.get(), buffer->handle);
+            }
+        }
+    }
+    write(fd, lines.string(), lines.size());
+}
+
+bool Camera3BufferManager::checkIfStreamRegisteredLocked(int streamId, int streamSetId) const {
+    ssize_t setIdx = mStreamSetMap.indexOfKey(streamSetId);
+    if (setIdx == NAME_NOT_FOUND) {
+        ALOGV("%s: stream set %d is not registered to stream set map yet!",
+                __FUNCTION__, streamSetId);
+        return false;
+    }
+
+    ssize_t streamIdx = mStreamSetMap.valueAt(setIdx).streamInfoMap.indexOfKey(streamId);
+    if (streamIdx == NAME_NOT_FOUND) {
+        ALOGV("%s: stream %d is not registered to stream info map yet!", __FUNCTION__, streamId);
+        return false;
+    }
+
+    size_t bufferWaterMark = mStreamSetMap[setIdx].maxAllowedBufferCount;
+    if (bufferWaterMark == 0 || bufferWaterMark > kMaxBufferCount) {
+        ALOGW("%s: stream %d with stream set %d is not registered correctly to stream set map,"
+                " as the water mark (%zu) is wrong!",
+                __FUNCTION__, streamId, streamSetId, bufferWaterMark);
+        return false;
+    }
+
+    return true;
+}
+
+status_t Camera3BufferManager::addBufferToBufferListLocked(BufferList& bufList,
+        const BufferEntry& buffer) {
+    // TODO: need add some sanity check here.
+    bufList.push_back(buffer);
+
+    return OK;
+}
+
+status_t Camera3BufferManager::removeBuffersFromBufferListLocked(BufferList& bufferList,
+        int streamId) {
+    BufferList::iterator i = bufferList.begin();
+    while (i != bufferList.end()) {
+        ssize_t idx = i->indexOfKey(streamId);
+        if (idx != NAME_NOT_FOUND) {
+            ALOGV("%s: Remove a buffer for stream %d, free buffer total count: %zu",
+                    __FUNCTION__, streamId, bufferList.size());
+            i->removeItem(streamId);
+            if (i->isEmpty()) {
+                i = bufferList.erase(i);
+            }
+        } else {
+            i++;
+        }
+    }
+
+    return OK;
+}
+
+bool Camera3BufferManager::hasBufferForStreamLocked(BufferList& buffers, int streamId) {
+    BufferList::iterator i = buffers.begin();
+    while (i != buffers.end()) {
+        ssize_t idx = i->indexOfKey(streamId);
+        if (idx != NAME_NOT_FOUND) {
+            return true;
+        }
+        i++;
+    }
+
+    return false;
+}
+
+Camera3BufferManager::GraphicBufferEntry Camera3BufferManager::getFirstBufferFromBufferListLocked(
+        BufferList& buffers, int streamId) {
+    // Try to get the first buffer from the free buffer list if there is one.
+    GraphicBufferEntry entry;
+    BufferList::iterator i = buffers.begin();
+    while (i != buffers.end()) {
+        ssize_t idx = i->indexOfKey(streamId);
+        if (idx != NAME_NOT_FOUND) {
+            entry = GraphicBufferEntry(i->valueAt(idx));
+            i = buffers.erase(i);
+            break;
+        } else {
+            i++;
+        }
+    }
+
+    ALOGV_IF(entry.graphicBuffer == 0, "%s: Unable to find free buffer for stream %d",
+            __FUNCTION__, streamId);
+    return entry;
+}
+
+} // namespace camera3
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.h b/services/camera/libcameraservice/device3/Camera3BufferManager.h
new file mode 100644
index 0000000..ab6541e
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.h
@@ -0,0 +1,339 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_BUFFER_MANAGER_H
+#define ANDROID_SERVERS_CAMERA3_BUFFER_MANAGER_H
+
+#include <list>
+#include <algorithm>
+#include <ui/GraphicBuffer.h>
+#include <utils/RefBase.h>
+#include <utils/KeyedVector.h>
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+struct StreamInfo;
+class Camera3OutputStream;
+
+/**
+ * A class managing the graphic buffers that is used by camera output streams. It allocates and
+ * hands out Gralloc buffers to the clients (e.g., Camera3OutputStream) based on the requests.
+ * When clients request a buffer, buffer manager will pick a buffer if there are some already
+ * allocated buffer available, will allocate a buffer otherwise. When there are too many allocated
+ * buffer maintained by the buffer manager, it will dynamically deallocate some buffers that are
+ * solely owned by this buffer manager.
+ * In doing so, it reduces the memory footprint unless it is already minimal without impacting
+ * performance.
+ *
+ */
+class Camera3BufferManager: public virtual RefBase {
+public:
+    Camera3BufferManager(const sp<IGraphicBufferAlloc>& allocator = NULL);
+
+    virtual ~Camera3BufferManager();
+
+    /**
+     * This method registers an output stream to this buffer manager by using the provided stream
+     * information.
+     *
+     * The stream info includes the necessary information such as stream size, format, buffer count,
+     * usage flags, etc. for the buffer manager to allocate and hand out buffers for this stream.
+     *
+     * It's illegal to call this method if the stream is not CONFIGURED yet, as some critical
+     * stream properties (e.g., combined usage flags) are only available in this state. It is also
+     * illegal to call this method with an invalid stream set ID (CAMERA3_STREAM_SET_ID_INVALID),
+     * as the invalid stream set ID indicates that this stream doesn't intend to use buffer manager.
+     *
+     *
+     * Once a stream is successfully registered to this buffer manager, the buffer manager takes
+     * over the buffer allocation role and provides buffers to this stream via getBufferForStream().
+     * The returned buffer can be sent to the camera HAL for image output, and then queued to the
+     * ANativeWindow (Surface) for downstream consumer to acquire. Once the image buffer is released
+     * by the consumer end point, the BufferQueueProducer callback onBufferReleased will call
+     * returnBufferForStream() to return the free buffer to this buffer manager. If the stream
+     * uses buffer manager to manage the stream buffers, it should disable the BufferQueue
+     * allocation via IGraphicBufferProducer::allowAllocation(false).
+     *
+     * Registering an already registered stream has no effect.
+     *
+     * Return values:
+     *
+     *  OK:                Registration of the new stream was successful.
+     *  BAD_VALUE:         This stream is not at CONFIGURED state, or the stream ID or stream set
+     *                     ID are invalid, or attempting to register the same stream to multiple
+     *                     stream sets, or other stream properties are invalid.
+     *  INVALID_OPERATION: This buffer manager doesn't support buffer sharing across this stream
+     *                     and other streams that were already registered with the same stream set
+     *                     ID.
+     */
+    status_t registerStream(wp<Camera3OutputStream>& stream, const StreamInfo &streamInfo);
+
+    /**
+     * This method unregisters a stream from this buffer manager.
+     *
+     * After a stream is unregistered, further getBufferForStream() calls will fail for this stream.
+     * After all streams for a given stream set are unregistered, all the buffers solely owned (for
+     * this stream set) by this buffer manager will be freed; all buffers subsequently returned to
+     * this buffer manager for this stream set will be freed immediately.
+     *
+     * Return values:
+     *
+     *  OK:        Removal of the a stream from this buffer manager was successful.
+     *  BAD_VALUE: stream ID or stream set ID are invalid, or stream ID and stream set ID
+     *             combination doesn't match what was registered, or this stream wasn't registered
+     *             to this buffer manager before.
+     */
+    status_t unregisterStream(int streamId, int streamSetId);
+
+    /**
+     * This method obtains a buffer for a stream from this buffer manager.
+     *
+     * This method returns the first free buffer from the free buffer list (associated with this
+     * stream set) if there is any. Otherwise, it will allocate a buffer for this stream, return
+     * it and increment its count of handed-out buffers. When the total number of allocated buffers
+     * is too high, it may deallocate the unused buffers to save memory footprint of this stream
+     * set.
+     *
+     * After this call, the client takes over the ownership of this buffer if it is not freed.
+     *
+     * Return values:
+     *
+     *  OK:        Getting buffer for this stream was successful.
+     *  ALREADY_EXISTS: Enough free buffers are already attached to this output buffer queue,
+     *             user should just dequeue from the buffer queue.
+     *  BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID
+     *             combination doesn't match what was registered, or this stream wasn't registered
+     *             to this buffer manager before.
+     *  NO_MEMORY: Unable to allocate a buffer for this stream at this time.
+     */
+    status_t getBufferForStream(int streamId, int streamSetId, sp<GraphicBuffer>* gb, int* fenceFd);
+
+    /**
+     * This method notifies the manager that a buffer has been released by the consumer.
+     *
+     * The buffer is not returned to the buffer manager, but is available for the stream the buffer
+     * is attached to for dequeuing.
+     *
+     * The notification lets the manager know how many buffers are directly available to the stream.
+     *
+     * If onBufferReleased is called for a given released buffer,
+     * returnBufferForStream may not be called for the same buffer, until the
+     * buffer has been reused. The manager will call detachBuffer on the stream
+     * if it needs the released buffer otherwise.
+     *
+     * Return values:
+     *
+     *  OK:        Buffer release was processed succesfully
+     *  BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID
+     *             combination doesn't match what was registered, or this stream wasn't registered
+     *             to this buffer manager before.
+     */
+    status_t onBufferReleased(int streamId, int streamSetId);
+
+    /**
+     * This method returns a buffer for a stream to this buffer manager.
+     *
+     * When a buffer is returned, it is treated as a free buffer and may either be reused for future
+     * getBufferForStream() calls, or freed if there total number of outstanding allocated buffers
+     * is too large. The latter only applies to the case where the buffer are physically shared
+     * between streams in the same stream set. A physically shared buffer is the buffer that has one
+     * physical back store but multiple handles. Multiple stream can access the same physical memory
+     * with their own handles. Physically shared buffer can only be supported by Gralloc HAL V1.
+     * See hardware/libhardware/include/hardware/gralloc1.h for more details.
+     *
+     *
+     * This call takes the ownership of the returned buffer if it was allocated by this buffer
+     * manager; clients should not use this buffer after this call. Attempting to access this buffer
+     * after this call will have undefined behavior. Holding a reference to this buffer after this
+     * call may cause memory leakage. If a BufferQueue is used to track the buffers handed out by
+     * this buffer queue, it is recommended to call detachNextBuffer() from the buffer queue after
+     * BufferQueueProducer onBufferReleased callback is fired, and return it to this buffer manager.
+     *
+     *  OK:        Buffer return for this stream was successful.
+     *  BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID combination
+     *             doesn't match what was registered, or this stream wasn't registered to this
+     *             buffer manager before.
+     */
+    status_t returnBufferForStream(int streamId, int streamSetId, const sp<GraphicBuffer>& buffer,
+            int fenceFd);
+
+    /**
+     * Dump the buffer manager statistics.
+     */
+    void     dump(int fd, const Vector<String16> &args) const;
+
+private:
+    /**
+     * Lock to synchronize the access to the methods of this class.
+     */
+    mutable Mutex mLock;
+
+    static const size_t kMaxBufferCount = BufferQueueDefs::NUM_BUFFER_SLOTS;
+
+    /**
+     * mAllocator is the connection to SurfaceFlinger that is used to allocate new GraphicBuffer
+     * objects.
+     */
+    sp<IGraphicBufferAlloc> mAllocator;
+
+    struct GraphicBufferEntry {
+        sp<GraphicBuffer> graphicBuffer;
+        int fenceFd;
+        GraphicBufferEntry(const sp<GraphicBuffer>& gb = 0, int fd = -1) :
+            graphicBuffer(gb),
+            fenceFd(fd) {}
+    };
+
+    /**
+     * A buffer entry (indexed by stream ID) represents a single physically allocated buffer. For
+     * Gralloc V0, since each physical buffer is associated with one stream, this is
+     * a single entry map. For Gralloc V1, one physical buffer can be shared between different
+     * streams in one stream set, so this entry may include multiple entries, where the different
+     * graphic buffers have the same common Gralloc backing store.
+     */
+    typedef int StreamId;
+    typedef KeyedVector<StreamId, GraphicBufferEntry> BufferEntry;
+
+    typedef std::list<BufferEntry> BufferList;
+
+    /**
+     * Stream info map (indexed by stream ID) tracks all the streams registered to a particular
+     * stream set.
+     */
+    typedef KeyedVector<StreamId, StreamInfo> InfoMap;
+
+    /**
+     * Stream set buffer count map (indexed by stream ID) tracks all buffer counts of the streams
+     * registered to a particular stream set.
+     */
+    typedef KeyedVector<StreamId, size_t> BufferCountMap;
+
+    /**
+     * StreamSet keeps track of the stream info, free buffer list and hand-out buffer counts for
+     * each stream set.
+     */
+    struct StreamSet {
+        /**
+         * Stream set buffer count water mark representing the max number of allocated buffers
+         * (hand-out buffers + free buffers) count for each stream set. For a given stream set, when
+         * getBufferForStream() is called on this buffer manager, if the total allocated buffer
+         * count exceeds this water mark, the buffer manager will attempt to reduce it as follows:
+         *
+         * In getBufferForStream(), find a buffer associated with other streams (inside the same
+         * stream set) on the free buffer list and free it. For Gralloc V1, can just free the top
+         * of the free buffer list if the physical buffer sharing in this stream is supported.
+         *
+         * For a particular stream set, a larger allocatedBufferWaterMark increases the memory
+         * footprint of the stream set, but reduces the chance that getBufferForStream() will have
+         * to allocate a new buffer. We assume that the streams in one stream set are not streaming
+         * simultaneously, the max allocated buffer count water mark for a stream set will the max
+         * of all streams' total buffer counts. This will avoid new buffer allocation in steady
+         * streaming state.
+         *
+         * This water mark can be dynamically changed, and will grow when the hand-out buffer count
+         * of each stream increases, until it reaches the maxAllowedBufferCount.
+         */
+        size_t allocatedBufferWaterMark;
+
+        /**
+         * The max allowed buffer count for this stream set. It is the max of total number of
+         * buffers for each stream. This is the upper bound of the allocatedBufferWaterMark.
+         */
+        size_t maxAllowedBufferCount;
+
+        /**
+         * The stream info for all streams in this set
+         */
+        InfoMap streamInfoMap;
+        /**
+         * The free buffer list for all the buffers belong to this set. The free buffers are
+         * returned by the returnBufferForStream() call, and available for reuse.
+         */
+        BufferList freeBuffers;
+        /**
+         * The count of the buffers that were handed out to the streams of this set.
+         */
+        BufferCountMap handoutBufferCountMap;
+        /**
+         * The count of the buffers that are attached to the streams of this set.
+         * An attached buffer may be free or handed out
+         */
+        BufferCountMap attachedBufferCountMap;
+
+        StreamSet() {
+            allocatedBufferWaterMark = 0;
+            maxAllowedBufferCount = 0;
+        }
+    };
+
+    /**
+     * Stream set map managed by this buffer manager.
+     */
+    typedef int StreamSetId;
+    KeyedVector<StreamSetId, StreamSet> mStreamSetMap;
+    KeyedVector<StreamId, wp<Camera3OutputStream>> mStreamMap;
+
+    // TODO: There is no easy way to query the Gralloc version in this code yet, we have different
+    // code paths for different Gralloc versions, hardcode something here for now.
+    const uint32_t mGrallocVersion = GRALLOC_DEVICE_API_VERSION_0_1;
+
+    /**
+     * Check if this stream was successfully registered already. This method needs to be called with
+     * mLock held.
+     */
+    bool checkIfStreamRegisteredLocked(int streamId, int streamSetId) const;
+
+    /**
+     * Add a buffer entry to the BufferList. This method needs to be called with mLock held.
+     */
+    status_t addBufferToBufferListLocked(BufferList &bufList, const BufferEntry &buffer);
+
+    /**
+     * Remove all buffers from the BufferList.
+     *
+     * Note that this doesn't mean that the buffers are freed after this call. A buffer is freed
+     * only if all other references to it are dropped.
+     *
+     * This method needs to be called with mLock held.
+     */
+    status_t removeBuffersFromBufferListLocked(BufferList &bufList, int streamId);
+
+    /**
+     * Get the first available buffer from the buffer list for this stream. The graphicBuffer inside
+     * this entry will be NULL if there is no any GraphicBufferEntry found. After this call, the
+     * GraphicBufferEntry will be removed from the BufferList if a GraphicBufferEntry is found.
+     *
+     * This method needs to be called with mLock held.
+     *
+     */
+    GraphicBufferEntry getFirstBufferFromBufferListLocked(BufferList& buffers, int streamId);
+
+    /**
+     * Check if there is any buffer associated with this stream in the given buffer list.
+     *
+     * This method needs to be called with mLock held.
+     *
+     */
+    bool inline hasBufferForStreamLocked(BufferList& buffers, int streamId);
+};
+
+} // namespace camera3
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA3_BUFFER_MANAGER_H
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 8b43154..96f9338 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -43,7 +43,8 @@
 #include <utils/Trace.h>
 #include <utils/Timers.h>
 
-#include "CameraService.h"
+#include <android/hardware/camera2/ICameraDeviceUser.h>
+
 #include "utils/CameraTraces.h"
 #include "mediautils/SchedulingPolicyService.h"
 #include "device3/Camera3Device.h"
@@ -65,6 +66,7 @@
         mStatusWaiters(0),
         mUsePartialResult(false),
         mNumPartialResults(1),
+        mTimestampOffset(0),
         mNextResultFrameNumber(0),
         mNextReprocessResultFrameNumber(0),
         mNextShutterFrameNumber(0),
@@ -132,8 +134,7 @@
     }
 
     camera_info info;
-    res = CameraService::filterGetInfoErrorCode(module->getCameraInfo(
-        mId, &info));
+    res = module->getCameraInfo(mId, &info);
     if (res != OK) return res;
 
     if (info.device_version != device->common.version) {
@@ -168,6 +169,9 @@
         return res;
     }
 
+    /** Create buffer manager */
+    mBufferManager = new Camera3BufferManager();
+
     bool aeLockAvailable = false;
     camera_metadata_ro_entry aeLockAvailableEntry;
     res = find_camera_metadata_ro_entry(info.static_camera_characteristics,
@@ -196,12 +200,28 @@
     mDeviceInfo = info.static_camera_characteristics;
     mHal3Device = device;
 
+    // Determine whether we need to derive sensitivity boost values for older devices.
+    // If post-RAW sensitivity boost range is listed, so should post-raw sensitivity control
+    // be listed (as the default value 100)
+    if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_4 &&
+            mDeviceInfo.exists(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE)) {
+        mDerivePostRawSensKey = true;
+    }
+
     internalUpdateStatusLocked(STATUS_UNCONFIGURED);
     mNextStreamId = 0;
     mDummyStreamId = NO_STREAM;
     mNeedConfig = true;
     mPauseStateNotify = false;
 
+    // Measure the clock domain offset between camera and video/hw_composer
+    camera_metadata_entry timestampSource =
+            mDeviceInfo.find(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE);
+    if (timestampSource.count > 0 && timestampSource.data.u8[0] ==
+            ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME) {
+        mTimestampOffset = getMonoToBoottimeOffset();
+    }
+
     // Will the HAL be sending in early partial result metadata?
     if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
         camera_metadata_entry partialResultsCount =
@@ -294,6 +314,7 @@
 
         mRequestThread.clear();
         mStatusTracker.clear();
+        mBufferManager.clear();
 
         hal3Device = mHal3Device;
     }
@@ -379,6 +400,49 @@
     return Size(maxJpegWidth, maxJpegHeight);
 }
 
+nsecs_t Camera3Device::getMonoToBoottimeOffset() {
+    // try three times to get the clock offset, choose the one
+    // with the minimum gap in measurements.
+    const int tries = 3;
+    nsecs_t bestGap, measured;
+    for (int i = 0; i < tries; ++i) {
+        const nsecs_t tmono = systemTime(SYSTEM_TIME_MONOTONIC);
+        const nsecs_t tbase = systemTime(SYSTEM_TIME_BOOTTIME);
+        const nsecs_t tmono2 = systemTime(SYSTEM_TIME_MONOTONIC);
+        const nsecs_t gap = tmono2 - tmono;
+        if (i == 0 || gap < bestGap) {
+            bestGap = gap;
+            measured = tbase - ((tmono + tmono2) >> 1);
+        }
+    }
+    return measured;
+}
+
+/**
+ * Map Android N dataspace definitions back to Android M definitions, for
+ * use with HALv3.3 or older.
+ *
+ * Only map where correspondences exist, and otherwise preserve the value.
+ */
+android_dataspace Camera3Device::mapToLegacyDataspace(android_dataspace dataSpace) {
+    switch (dataSpace) {
+        case HAL_DATASPACE_V0_SRGB_LINEAR:
+            return HAL_DATASPACE_SRGB_LINEAR;
+        case HAL_DATASPACE_V0_SRGB:
+            return HAL_DATASPACE_SRGB;
+        case HAL_DATASPACE_V0_JFIF:
+            return HAL_DATASPACE_JFIF;
+        case HAL_DATASPACE_V0_BT601_625:
+            return HAL_DATASPACE_BT601_625;
+        case HAL_DATASPACE_V0_BT601_525:
+            return HAL_DATASPACE_BT601_525;
+        case HAL_DATASPACE_V0_BT709:
+            return HAL_DATASPACE_BT709;
+        default:
+            return dataSpace;
+    }
+}
+
 ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
     // Get max jpeg size (area-wise).
     Size maxJpegResolution = getMaxJpegResolution();
@@ -423,7 +487,31 @@
     return maxBytesForPointCloud;
 }
 
+ssize_t Camera3Device::getRawOpaqueBufferSize(int32_t width, int32_t height) const {
+    const int PER_CONFIGURATION_SIZE = 3;
+    const int WIDTH_OFFSET = 0;
+    const int HEIGHT_OFFSET = 1;
+    const int SIZE_OFFSET = 2;
+    camera_metadata_ro_entry rawOpaqueSizes =
+        mDeviceInfo.find(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
+    size_t count = rawOpaqueSizes.count;
+    if (count == 0 || (count % PER_CONFIGURATION_SIZE)) {
+        ALOGE("%s: Camera %d: bad opaque RAW size static metadata length(%zu)!",
+                __FUNCTION__, mId, count);
+        return BAD_VALUE;
+    }
 
+    for (size_t i = 0; i < count; i += PER_CONFIGURATION_SIZE) {
+        if (width == rawOpaqueSizes.data.i32[i + WIDTH_OFFSET] &&
+                height == rawOpaqueSizes.data.i32[i + HEIGHT_OFFSET]) {
+            return rawOpaqueSizes.data.i32[i + SIZE_OFFSET];
+        }
+    }
+
+    ALOGE("%s: Camera %d: cannot find size for %dx%d opaque RAW image!",
+            __FUNCTION__, mId, width, height);
+    return BAD_VALUE;
+}
 
 status_t Camera3Device::dump(int fd, const Vector<String16> &args) {
     ATRACE_CALL();
@@ -441,6 +529,15 @@
             "Camera %d: %s: Unable to lock main lock, proceeding anyway",
             mId, __FUNCTION__);
 
+    bool dumpTemplates = false;
+    String16 templatesOption("-t");
+    int n = args.size();
+    for (int i = 0; i < n; i++) {
+        if (args[i] == templatesOption) {
+            dumpTemplates = true;
+        }
+    }
+
     String8 lines;
 
     const char *status =
@@ -470,6 +567,12 @@
         mOutputStreams[i]->dump(fd,args);
     }
 
+    if (mBufferManager != NULL) {
+        lines = String8("    Camera3 Buffer Manager:\n");
+        write(fd, lines.string(), lines.size());
+        mBufferManager->dump(fd, args);
+    }
+
     lines = String8("    In-flight requests:\n");
     if (mInFlightMap.size() == 0) {
         lines.append("      None\n");
@@ -492,6 +595,33 @@
         lastRequest.dump(fd, /*verbosity*/2, /*indentation*/6);
     }
 
+    if (dumpTemplates) {
+        const char *templateNames[] = {
+            "TEMPLATE_PREVIEW",
+            "TEMPLATE_STILL_CAPTURE",
+            "TEMPLATE_VIDEO_RECORD",
+            "TEMPLATE_VIDEO_SNAPSHOT",
+            "TEMPLATE_ZERO_SHUTTER_LAG",
+            "TEMPLATE_MANUAL"
+        };
+
+        for (int i = 1; i < CAMERA3_TEMPLATE_COUNT; i++) {
+            const camera_metadata_t *templateRequest;
+            templateRequest =
+                mHal3Device->ops->construct_default_request_settings(
+                    mHal3Device, i);
+            lines = String8::format("    HAL Request %s:\n", templateNames[i-1]);
+            if (templateRequest == NULL) {
+                lines.append("       Not supported\n");
+                write(fd, lines.string(), lines.size());
+            } else {
+                write(fd, lines.string(), lines.size());
+                dump_indented_camera_metadata(templateRequest,
+                        fd, /*verbosity*/2, /*indentation*/8);
+            }
+        }
+    }
+
     if (mHal3Device != NULL) {
         lines = String8("    HAL device dump:\n");
         write(fd, lines.string(), lines.size());
@@ -664,19 +794,12 @@
 
     if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
         res = configureStreamsLocked();
-        // Stream configuration failed due to unsupported configuration.
-        // Device back to unconfigured state. Client might try other configuraitons
-        if (res == BAD_VALUE && mStatus == STATUS_UNCONFIGURED) {
-            CLOGE("No streams configured");
-            return NULL;
-        }
-        // Stream configuration failed for other reason. Fatal.
+        // Stream configuration failed. Client might try other configuraitons.
         if (res != OK) {
-            SET_ERR_L("Can't set up streams: %s (%d)", strerror(-res), res);
+            CLOGE("Can't set up streams: %s (%d)", strerror(-res), res);
             return NULL;
-        }
-        // Stream configuration successfully configure to empty stream configuration.
-        if (mStatus == STATUS_UNCONFIGURED) {
+        } else if (mStatus == STATUS_UNCONFIGURED) {
+            // Stream configuration successfully configure to empty stream configuration.
             CLOGE("No streams configured");
             return NULL;
         }
@@ -867,7 +990,7 @@
 
 status_t Camera3Device::createStream(sp<Surface> consumer,
         uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
-        camera3_stream_rotation_t rotation, int *id) {
+        camera3_stream_rotation_t rotation, int *id, int streamSetId) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
@@ -904,6 +1027,15 @@
     assert(mStatus != STATUS_ACTIVE);
 
     sp<Camera3OutputStream> newStream;
+    // Overwrite stream set id to invalid for HAL3.2 or lower, as buffer manager does support
+    // such devices.
+    if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_2) {
+        streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
+    }
+    // Use legacy dataspace values for older HALs
+    if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_3) {
+        dataSpace = mapToLegacyDataspace(dataSpace);
+    }
     if (format == HAL_PIXEL_FORMAT_BLOB) {
         ssize_t blobBufferSize;
         if (dataSpace != HAL_DATASPACE_DEPTH) {
@@ -920,13 +1052,34 @@
             }
         }
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
-                width, height, blobBufferSize, format, dataSpace, rotation);
+                width, height, blobBufferSize, format, dataSpace, rotation,
+                mTimestampOffset, streamSetId);
+    } else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
+        ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height);
+        if (rawOpaqueBufferSize <= 0) {
+            SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
+            return BAD_VALUE;
+        }
+        newStream = new Camera3OutputStream(mNextStreamId, consumer,
+                width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
+                mTimestampOffset, streamSetId);
     } else {
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
-                width, height, format, dataSpace, rotation);
+                width, height, format, dataSpace, rotation,
+                mTimestampOffset, streamSetId);
     }
     newStream->setStatusTracker(mStatusTracker);
 
+    /**
+     * Camera3 Buffer manager is only supported by HAL3.3 onwards, as the older HALs ( < HAL3.2)
+     * requires buffers to be statically allocated for internal static buffer registration, while
+     * the buffers provided by buffer manager are really dynamically allocated. For HAL3.2, because
+     * not all HAL implementation supports dynamic buffer registeration, exlude it as well.
+     */
+    if (mDeviceVersion > CAMERA_DEVICE_API_VERSION_3_2) {
+        newStream->setBufferManager(mBufferManager);
+    }
+
     res = mOutputStreams.add(mNextStreamId, newStream);
     if (res < 0) {
         SET_ERR_L("Can't add new stream to set: %s (%d)", strerror(-res), res);
@@ -1119,7 +1272,7 @@
 
     if (templateId <= 0 || templateId >= CAMERA3_TEMPLATE_COUNT) {
         android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26866110",
-                IPCThreadState::self()->getCallingUid(), NULL, 0);
+                IPCThreadState::self()->getCallingUid(), nullptr, 0);
         return BAD_VALUE;
     }
 
@@ -1158,9 +1311,19 @@
               __FUNCTION__, templateId);
         return BAD_VALUE;
     }
-    *request = rawRequest;
+
     mRequestTemplateCache[templateId] = rawRequest;
 
+    // Derive some new keys for backward compatibility
+    if (mDerivePostRawSensKey && !mRequestTemplateCache[templateId].exists(
+            ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST)) {
+        int32_t defaultBoost[1] = {100};
+        mRequestTemplateCache[templateId].update(
+                ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST,
+                defaultBoost, 1);
+    }
+
+    *request = mRequestTemplateCache[templateId];
     return OK;
 }
 
@@ -1500,6 +1663,26 @@
     return stream->tearDown();
 }
 
+status_t Camera3Device::addBufferListenerForStream(int streamId,
+        wp<Camera3StreamBufferListener> listener) {
+    ATRACE_CALL();
+    ALOGV("%s: Camera %d: Adding buffer listener for stream %d", __FUNCTION__, mId, streamId);
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    sp<Camera3StreamInterface> stream;
+    ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
+    if (outputStreamIdx == NAME_NOT_FOUND) {
+        CLOGE("Stream %d does not exist", streamId);
+        return BAD_VALUE;
+    }
+
+    stream = mOutputStreams.editValueAt(outputStreamIdx);
+    stream->addBufferListener(listener);
+
+    return OK;
+}
+
 uint32_t Camera3Device::getDeviceVersion() {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
@@ -1633,6 +1816,33 @@
     return false;
 }
 
+void Camera3Device::cancelStreamsConfigurationLocked() {
+    int res = OK;
+    if (mInputStream != NULL && mInputStream->isConfiguring()) {
+        res = mInputStream->cancelConfiguration();
+        if (res != OK) {
+            CLOGE("Can't cancel configuring input stream %d: %s (%d)",
+                    mInputStream->getId(), strerror(-res), res);
+        }
+    }
+
+    for (size_t i = 0; i < mOutputStreams.size(); i++) {
+        sp<Camera3OutputStreamInterface> outputStream = mOutputStreams.editValueAt(i);
+        if (outputStream->isConfiguring()) {
+            res = outputStream->cancelConfiguration();
+            if (res != OK) {
+                CLOGE("Can't cancel configuring output stream %d: %s (%d)",
+                        outputStream->getId(), strerror(-res), res);
+            }
+        }
+    }
+
+    // Return state to that at start of call, so that future configures
+    // properly clean things up
+    internalUpdateStatusLocked(STATUS_UNCONFIGURED);
+    mNeedConfig = true;
+}
+
 status_t Camera3Device::configureStreamsLocked() {
     ATRACE_CALL();
     status_t res;
@@ -1672,7 +1882,8 @@
         camera3_stream_t *inputStream;
         inputStream = mInputStream->startConfiguration();
         if (inputStream == NULL) {
-            SET_ERR_L("Can't start input stream configuration");
+            CLOGE("Can't start input stream configuration");
+            cancelStreamsConfigurationLocked();
             return INVALID_OPERATION;
         }
         streams.add(inputStream);
@@ -1691,7 +1902,8 @@
         camera3_stream_t *outputStream;
         outputStream = mOutputStreams.editValueAt(i)->startConfiguration();
         if (outputStream == NULL) {
-            SET_ERR_L("Can't start output stream configuration");
+            CLOGE("Can't start output stream configuration");
+            cancelStreamsConfigurationLocked();
             return INVALID_OPERATION;
         }
         streams.add(outputStream);
@@ -1708,35 +1920,8 @@
     if (res == BAD_VALUE) {
         // HAL rejected this set of streams as unsupported, clean up config
         // attempt and return to unconfigured state
-        if (mInputStream != NULL && mInputStream->isConfiguring()) {
-            res = mInputStream->cancelConfiguration();
-            if (res != OK) {
-                SET_ERR_L("Can't cancel configuring input stream %d: %s (%d)",
-                        mInputStream->getId(), strerror(-res), res);
-                return res;
-            }
-        }
-
-        for (size_t i = 0; i < mOutputStreams.size(); i++) {
-            sp<Camera3OutputStreamInterface> outputStream =
-                    mOutputStreams.editValueAt(i);
-            if (outputStream->isConfiguring()) {
-                res = outputStream->cancelConfiguration();
-                if (res != OK) {
-                    SET_ERR_L(
-                        "Can't cancel configuring output stream %d: %s (%d)",
-                        outputStream->getId(), strerror(-res), res);
-                    return res;
-                }
-            }
-        }
-
-        // Return state to that at start of call, so that future configures
-        // properly clean things up
-        internalUpdateStatusLocked(STATUS_UNCONFIGURED);
-        mNeedConfig = true;
-
-        ALOGV("%s: Camera %d: Stream configuration failed", __FUNCTION__, mId);
+        CLOGE("Set of requested inputs/outputs not supported by HAL");
+        cancelStreamsConfigurationLocked();
         return BAD_VALUE;
     } else if (res != OK) {
         // Some other kind of error from configure_streams - this is not
@@ -1753,9 +1938,10 @@
     if (mInputStream != NULL && mInputStream->isConfiguring()) {
         res = mInputStream->finishConfiguration(mHal3Device);
         if (res != OK) {
-            SET_ERR_L("Can't finish configuring input stream %d: %s (%d)",
+            CLOGE("Can't finish configuring input stream %d: %s (%d)",
                     mInputStream->getId(), strerror(-res), res);
-            return res;
+            cancelStreamsConfigurationLocked();
+            return BAD_VALUE;
         }
     }
 
@@ -1765,22 +1951,23 @@
         if (outputStream->isConfiguring()) {
             res = outputStream->finishConfiguration(mHal3Device);
             if (res != OK) {
-                SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+                CLOGE("Can't finish configuring output stream %d: %s (%d)",
                         outputStream->getId(), strerror(-res), res);
-                return res;
+                cancelStreamsConfigurationLocked();
+                return BAD_VALUE;
             }
         }
     }
 
     // Request thread needs to know to avoid using repeat-last-settings protocol
     // across configure_streams() calls
-    mRequestThread->configurationComplete();
+    mRequestThread->configurationComplete(mIsConstrainedHighSpeedConfiguration);
 
     // Boost priority of request thread for high speed recording to SCHED_FIFO
     if (mIsConstrainedHighSpeedConfiguration) {
         pid_t requestThreadTid = mRequestThread->getTid();
         res = requestPriority(getpid(), requestThreadTid,
-                kConstrainedHighSpeedThreadPriority, true);
+                kConstrainedHighSpeedThreadPriority, /*asynchronous*/ false);
         if (res != OK) {
             ALOGW("Can't set realtime priority for request processing thread: %s (%d)",
                     strerror(-res), res);
@@ -1908,7 +2095,7 @@
 
     // Notify upstream about a device error
     if (mListener != NULL) {
-        mListener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+        mListener->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
                 CaptureResultExtras());
     }
 
@@ -1935,176 +2122,6 @@
     return OK;
 }
 
-/**
- * Check if all 3A fields are ready, and send off a partial 3A-only result
- * to the output frame queue
- */
-bool Camera3Device::processPartial3AResult(
-        uint32_t frameNumber,
-        const CameraMetadata& partial, const CaptureResultExtras& resultExtras) {
-
-    // Check if all 3A states are present
-    // The full list of fields is
-    //   android.control.afMode
-    //   android.control.awbMode
-    //   android.control.aeState
-    //   android.control.awbState
-    //   android.control.afState
-    //   android.control.afTriggerID
-    //   android.control.aePrecaptureID
-    // TODO: Add android.control.aeMode
-
-    bool gotAllStates = true;
-
-    uint8_t afMode;
-    uint8_t awbMode;
-    uint8_t aeState;
-    uint8_t afState;
-    uint8_t awbState;
-
-    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_MODE,
-        &afMode, frameNumber);
-
-    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_MODE,
-        &awbMode, frameNumber);
-
-    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_STATE,
-        &aeState, frameNumber);
-
-    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_STATE,
-        &afState, frameNumber);
-
-    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_STATE,
-        &awbState, frameNumber);
-
-    if (!gotAllStates) return false;
-
-    ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, "
-        "AF state %d, AE state %d, AWB state %d, "
-        "AF trigger %d, AE precapture trigger %d",
-        __FUNCTION__, mId, frameNumber, resultExtras.requestId,
-        afMode, awbMode,
-        afState, aeState, awbState,
-        resultExtras.afTriggerId, resultExtras.precaptureTriggerId);
-
-    // Got all states, so construct a minimal result to send
-    // In addition to the above fields, this means adding in
-    //   android.request.frameCount
-    //   android.request.requestId
-    //   android.quirks.partialResult (for HAL version below HAL3.2)
-
-    const size_t kMinimal3AResultEntries = 10;
-
-    Mutex::Autolock l(mOutputLock);
-
-    CaptureResult captureResult;
-    captureResult.mResultExtras = resultExtras;
-    captureResult.mMetadata = CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0);
-    // TODO: change this to sp<CaptureResult>. This will need other changes, including,
-    // but not limited to CameraDeviceBase::getNextResult
-    CaptureResult& min3AResult =
-            *mResultQueue.insert(mResultQueue.end(), captureResult);
-
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_FRAME_COUNT,
-            // TODO: This is problematic casting. Need to fix CameraMetadata.
-            reinterpret_cast<int32_t*>(&frameNumber), frameNumber)) {
-        return false;
-    }
-
-    int32_t requestId = resultExtras.requestId;
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_ID,
-            &requestId, frameNumber)) {
-        return false;
-    }
-
-    if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
-        static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
-        if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
-                &partialResult, frameNumber)) {
-            return false;
-        }
-    }
-
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_MODE,
-            &afMode, frameNumber)) {
-        return false;
-    }
-
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_MODE,
-            &awbMode, frameNumber)) {
-        return false;
-    }
-
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_STATE,
-            &aeState, frameNumber)) {
-        return false;
-    }
-
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_STATE,
-            &afState, frameNumber)) {
-        return false;
-    }
-
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_STATE,
-            &awbState, frameNumber)) {
-        return false;
-    }
-
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_TRIGGER_ID,
-            &resultExtras.afTriggerId, frameNumber)) {
-        return false;
-    }
-
-    if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
-            &resultExtras.precaptureTriggerId, frameNumber)) {
-        return false;
-    }
-
-    // We only send the aggregated partial when all 3A related metadata are available
-    // For both API1 and API2.
-    // TODO: we probably should pass through all partials to API2 unconditionally.
-    mResultSignal.signal();
-
-    return true;
-}
-
-template<typename T>
-bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag,
-        T* value, uint32_t frameNumber) {
-    (void) frameNumber;
-
-    camera_metadata_ro_entry_t entry;
-
-    entry = result.find(tag);
-    if (entry.count == 0) {
-        ALOGVV("%s: Camera %d: Frame %d: No %s provided by HAL!", __FUNCTION__,
-            mId, frameNumber, get_camera_metadata_tag_name(tag));
-        return false;
-    }
-
-    if (sizeof(T) == sizeof(uint8_t)) {
-        *value = entry.data.u8[0];
-    } else if (sizeof(T) == sizeof(int32_t)) {
-        *value = entry.data.i32[0];
-    } else {
-        ALOGE("%s: Unexpected type", __FUNCTION__);
-        return false;
-    }
-    return true;
-}
-
-template<typename T>
-bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag,
-        const T* value, uint32_t frameNumber) {
-    if (result.update(tag, value, 1) != NO_ERROR) {
-        mResultQueue.erase(--mResultQueue.end(), mResultQueue.end());
-        SET_ERR("Frame %d: Failed to set %s in partial metadata",
-                frameNumber, get_camera_metadata_tag_name(tag));
-        return false;
-    }
-    return true;
-}
-
 void Camera3Device::returnOutputBuffers(
         const camera3_stream_buffer_t *outputBuffers, size_t numBuffers,
         nsecs_t timestamp) {
@@ -2172,6 +2189,48 @@
     }
 }
 
+void Camera3Device::insertResultLocked(CaptureResult *result, uint32_t frameNumber,
+            const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+    if (result == nullptr) return;
+
+    if (result->mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
+            (int32_t*)&frameNumber, 1) != OK) {
+        SET_ERR("Failed to set frame number %d in metadata", frameNumber);
+        return;
+    }
+
+    if (result->mMetadata.update(ANDROID_REQUEST_ID, &result->mResultExtras.requestId, 1) != OK) {
+        SET_ERR("Failed to set request ID in metadata for frame %d", frameNumber);
+        return;
+    }
+
+    overrideResultForPrecaptureCancel(&result->mMetadata, aeTriggerCancelOverride);
+
+    // Valid result, insert into queue
+    List<CaptureResult>::iterator queuedResult =
+            mResultQueue.insert(mResultQueue.end(), CaptureResult(*result));
+    ALOGVV("%s: result requestId = %" PRId32 ", frameNumber = %" PRId64
+           ", burstId = %" PRId32, __FUNCTION__,
+           queuedResult->mResultExtras.requestId,
+           queuedResult->mResultExtras.frameNumber,
+           queuedResult->mResultExtras.burstId);
+
+    mResultSignal.signal();
+}
+
+
+void Camera3Device::sendPartialCaptureResult(const camera_metadata_t * partialResult,
+        const CaptureResultExtras &resultExtras, uint32_t frameNumber,
+        const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+    Mutex::Autolock l(mOutputLock);
+
+    CaptureResult captureResult;
+    captureResult.mResultExtras = resultExtras;
+    captureResult.mMetadata = partialResult;
+
+    insertResultLocked(&captureResult, frameNumber, aeTriggerCancelOverride);
+}
+
 
 void Camera3Device::sendCaptureResult(CameraMetadata &pendingMetadata,
         CaptureResultExtras &resultExtras,
@@ -2207,44 +2266,31 @@
     captureResult.mResultExtras = resultExtras;
     captureResult.mMetadata = pendingMetadata;
 
-    if (captureResult.mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
-            (int32_t*)&frameNumber, 1) != OK) {
-        SET_ERR("Failed to set frame# in metadata (%d)",
-                frameNumber);
-        return;
-    } else {
-        ALOGVV("%s: Camera %d: Set frame# in metadata (%d)",
-                __FUNCTION__, mId, frameNumber);
-    }
-
     // Append any previous partials to form a complete result
     if (mUsePartialResult && !collectedPartialResult.isEmpty()) {
         captureResult.mMetadata.append(collectedPartialResult);
     }
 
+    // Derive some new keys for backward compaibility
+    if (mDerivePostRawSensKey && !captureResult.mMetadata.exists(
+            ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST)) {
+        int32_t defaultBoost[1] = {100};
+        captureResult.mMetadata.update(
+                ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST,
+                defaultBoost, 1);
+    }
+
     captureResult.mMetadata.sort();
 
     // Check that there's a timestamp in the result metadata
-    camera_metadata_entry entry =
-            captureResult.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
+    camera_metadata_entry entry = captureResult.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
     if (entry.count == 0) {
         SET_ERR("No timestamp provided by HAL for frame %d!",
                 frameNumber);
         return;
     }
 
-    overrideResultForPrecaptureCancel(&captureResult.mMetadata, aeTriggerCancelOverride);
-
-    // Valid result, insert into queue
-    List<CaptureResult>::iterator queuedResult =
-            mResultQueue.insert(mResultQueue.end(), CaptureResult(captureResult));
-    ALOGVV("%s: result requestId = %" PRId32 ", frameNumber = %" PRId64
-           ", burstId = %" PRId32, __FUNCTION__,
-           queuedResult->mResultExtras.requestId,
-           queuedResult->mResultExtras.frameNumber,
-           queuedResult->mResultExtras.burstId);
-
-    mResultSignal.signal();
+    insertResultLocked(&captureResult, frameNumber, aeTriggerCancelOverride);
 }
 
 /**
@@ -2321,7 +2367,7 @@
                 }
                 isPartialResult = (result->partial_result < mNumPartialResults);
                 if (isPartialResult) {
-                    request.partialResult.collectedResult.append(result->result);
+                    request.collectedPartialResult.append(result->result);
                 }
             } else {
                 camera_metadata_ro_entry_t partialResultEntry;
@@ -2334,21 +2380,17 @@
                     // A partial result. Flag this as such, and collect this
                     // set of metadata into the in-flight entry.
                     isPartialResult = true;
-                    request.partialResult.collectedResult.append(
+                    request.collectedPartialResult.append(
                         result->result);
-                    request.partialResult.collectedResult.erase(
+                    request.collectedPartialResult.erase(
                         ANDROID_QUIRKS_PARTIAL_RESULT);
                 }
             }
 
             if (isPartialResult) {
-                // Fire off a 3A-only result if possible
-                if (!request.partialResult.haveSent3A) {
-                    request.partialResult.haveSent3A =
-                            processPartial3AResult(frameNumber,
-                                    request.partialResult.collectedResult,
-                                    request.resultExtras);
-                }
+                // Send partial capture result
+                sendPartialCaptureResult(result->result, request.resultExtras, frameNumber,
+                        request.aeTriggerCancelOverride);
             }
         }
 
@@ -2363,9 +2405,9 @@
                 return;
             }
             if (mUsePartialResult &&
-                    !request.partialResult.collectedResult.isEmpty()) {
+                    !request.collectedPartialResult.isEmpty()) {
                 collectedPartialResult.acquire(
-                    request.partialResult.collectedResult);
+                    request.collectedPartialResult);
             }
             request.haveResultMetadata = true;
         }
@@ -2408,7 +2450,7 @@
         if (result->result != NULL && !isPartialResult) {
             if (shutterTimestamp == 0) {
                 request.pendingMetadata = result->result;
-                request.partialResult.collectedResult = collectedPartialResult;
+                request.collectedPartialResult = collectedPartialResult;
             } else {
                 CameraMetadata metadata;
                 metadata = result->result;
@@ -2474,25 +2516,24 @@
 
     // Map camera HAL error codes to ICameraDeviceCallback error codes
     // Index into this with the HAL error code
-    static const ICameraDeviceCallbacks::CameraErrorCode
-            halErrorMap[CAMERA3_MSG_NUM_ERRORS] = {
+    static const int32_t halErrorMap[CAMERA3_MSG_NUM_ERRORS] = {
         // 0 = Unused error code
-        ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR,
+        hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR,
         // 1 = CAMERA3_MSG_ERROR_DEVICE
-        ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+        hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
         // 2 = CAMERA3_MSG_ERROR_REQUEST
-        ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+        hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
         // 3 = CAMERA3_MSG_ERROR_RESULT
-        ICameraDeviceCallbacks::ERROR_CAMERA_RESULT,
+        hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT,
         // 4 = CAMERA3_MSG_ERROR_BUFFER
-        ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER
+        hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER
     };
 
-    ICameraDeviceCallbacks::CameraErrorCode errorCode =
+    int32_t errorCode =
             ((msg.error_code >= 0) &&
                     (msg.error_code < CAMERA3_MSG_NUM_ERRORS)) ?
             halErrorMap[msg.error_code] :
-            ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
+            hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
 
     int streamId = 0;
     if (msg.error_stream != NULL) {
@@ -2506,13 +2547,13 @@
 
     CaptureResultExtras resultExtras;
     switch (errorCode) {
-        case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
             // SET_ERR calls notifyError
             SET_ERR("Camera HAL reported serious device error");
             break;
-        case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
-        case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
-        case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+        case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
             {
                 Mutex::Autolock l(mInFlightLock);
                 ssize_t idx = mInFlightMap.indexOfKey(msg.frame_number);
@@ -2527,6 +2568,7 @@
                             resultExtras.frameNumber);
                 }
             }
+            resultExtras.errorStreamId = streamId;
             if (listener != NULL) {
                 listener->notifyError(errorCode, resultExtras);
             } else {
@@ -2587,7 +2629,7 @@
 
             // send pending result and buffers
             sendCaptureResult(r.pendingMetadata, r.resultExtras,
-                r.partialResult.collectedResult, msg.frame_number,
+                r.collectedPartialResult, msg.frame_number,
                 r.hasInputBuffer, r.aeTriggerCancelOverride);
             returnOutputBuffers(r.pendingOutputBuffers.array(),
                 r.pendingOutputBuffers.size(), r.shutterTimestamp);
@@ -2628,6 +2670,7 @@
         mParent(parent),
         mStatusTracker(statusTracker),
         mHal3Device(hal3Device),
+        mListener(nullptr),
         mId(getId(parent)),
         mReconfigured(false),
         mDoPause(false),
@@ -2636,8 +2679,10 @@
         mLatestRequestId(NAME_NOT_FOUND),
         mCurrentAfTriggerId(0),
         mCurrentPreCaptureTriggerId(0),
-        mRepeatingLastFrameNumber(NO_IN_FLIGHT_REPEATING_FRAMES),
-        mAeLockAvailable(aeLockAvailable) {
+        mRepeatingLastFrameNumber(
+            hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
+        mAeLockAvailable(aeLockAvailable),
+        mPrepareVideoStream(false) {
     mStatusId = statusTracker->addComponent();
 }
 
@@ -2647,9 +2692,11 @@
     mListener = listener;
 }
 
-void Camera3Device::RequestThread::configurationComplete() {
+void Camera3Device::RequestThread::configurationComplete(bool isConstrainedHighSpeed) {
     Mutex::Autolock l(mRequestLock);
     mReconfigured = true;
+    // Prepare video stream for high speed recording.
+    mPrepareVideoStream = isConstrainedHighSpeed;
 }
 
 status_t Camera3Device::RequestThread::queueRequestList(
@@ -2744,7 +2791,7 @@
 
     unpauseForNewRequests();
 
-    mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
+    mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
     return OK;
 }
 
@@ -2761,11 +2808,16 @@
 
 status_t Camera3Device::RequestThread::clearRepeatingRequests(/*out*/int64_t *lastFrameNumber) {
     Mutex::Autolock l(mRequestLock);
+    return clearRepeatingRequestsLocked(lastFrameNumber);
+
+}
+
+status_t Camera3Device::RequestThread::clearRepeatingRequestsLocked(/*out*/int64_t *lastFrameNumber) {
     mRepeatingRequests.clear();
     if (lastFrameNumber != NULL) {
         *lastFrameNumber = mRepeatingLastFrameNumber;
     }
-    mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
+    mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
     return OK;
 }
 
@@ -2802,7 +2854,7 @@
             // The requestId and burstId fields were set when the request was
             // submitted originally (in convertMetadataListToRequestListLocked)
             (*it)->mResultExtras.frameNumber = mFrameNumber++;
-            listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+            listener->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
                     (*it)->mResultExtras);
         }
     }
@@ -2811,7 +2863,7 @@
     if (lastFrameNumber != NULL) {
         *lastFrameNumber = mRepeatingLastFrameNumber;
     }
-    mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
+    mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
     return OK;
 }
 
@@ -2915,6 +2967,31 @@
     }
 }
 
+void Camera3Device::RequestThread::checkAndStopRepeatingRequest() {
+    bool surfaceAbandoned = false;
+    int64_t lastFrameNumber = 0;
+    {
+        Mutex::Autolock l(mRequestLock);
+        // Check all streams needed by repeating requests are still valid. Otherwise, stop
+        // repeating requests.
+        for (const auto& request : mRepeatingRequests) {
+            for (const auto& s : request->mOutputStreams) {
+                if (s->isAbandoned()) {
+                    surfaceAbandoned = true;
+                    clearRepeatingRequestsLocked(&lastFrameNumber);
+                    break;
+                }
+            }
+            if (surfaceAbandoned) {
+                break;
+            }
+        }
+    }
+    if (surfaceAbandoned) {
+        mListener->notifyRepeatingRequestError(lastFrameNumber);
+    }
+}
+
 bool Camera3Device::RequestThread::threadLoop() {
     ATRACE_CALL();
     status_t res;
@@ -2946,6 +3023,8 @@
     if (res == TIMED_OUT) {
         // Not a fatal error if getting output buffers time out.
         cleanUpFailedRequests(/*sendRequestError*/ true);
+        // Check if any stream is abandoned.
+        checkAndStopRepeatingRequest();
         return true;
     } else if (res != OK) {
         cleanUpFailedRequests(/*sendRequestError*/ false);
@@ -3119,8 +3198,25 @@
                 captureRequest->mOutputStreams.size());
         halRequest->output_buffers = outputBuffers->array();
         for (size_t i = 0; i < captureRequest->mOutputStreams.size(); i++) {
-            res = captureRequest->mOutputStreams.editItemAt(i)->
-                    getBuffer(&outputBuffers->editItemAt(i));
+            sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(i);
+
+            // Prepare video buffers for high speed recording on the first video request.
+            if (mPrepareVideoStream && outputStream->isVideoStream()) {
+                // Only try to prepare video stream on the first video request.
+                mPrepareVideoStream = false;
+
+                res = outputStream->startPrepare(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX);
+                while (res == NOT_ENOUGH_DATA) {
+                    res = outputStream->prepareNextBuffer();
+                }
+                if (res != OK) {
+                    ALOGW("%s: Preparing video buffers for high speed failed: %s (%d)",
+                        __FUNCTION__, strerror(-res), res);
+                    outputStream->cancelPrepare();
+                }
+            }
+
+            res = outputStream->getBuffer(&outputBuffers->editItemAt(i));
             if (res != OK) {
                 // Can't get output buffer from gralloc queue - this could be due to
                 // abandoned queue or other consumer misbehavior, so not a fatal
@@ -3231,7 +3327,7 @@
             Mutex::Autolock l(mRequestLock);
             if (mListener != NULL) {
                 mListener->notifyError(
-                        ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+                        hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
                         captureRequest->mResultExtras);
             }
         }
@@ -3274,7 +3370,7 @@
     }
 
     if (mNextRequests.size() < batchSize) {
-        ALOGE("RequestThread: only get %d out of %d requests. Skipping requests.",
+        ALOGE("RequestThread: only get %zu out of %zu requests. Skipping requests.",
                 mNextRequests.size(), batchSize);
         cleanUpFailedRequests(/*sendRequestError*/true);
     }
@@ -3371,7 +3467,7 @@
                         " %s (%d)", __FUNCTION__, strerror(-res), res);
                 if (mListener != NULL) {
                     mListener->notifyError(
-                            ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+                            hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
                             nextRequest->mResultExtras);
                 }
                 return NULL;
@@ -3594,7 +3690,7 @@
 
 status_t Camera3Device::RequestThread::addDummyTriggerIds(
         const sp<CaptureRequest> &request) {
-    // Trigger ID 0 has special meaning in the HAL2 spec, so avoid it here
+    // Trigger ID 0 had special meaning in the HAL2 spec, so avoid it here
     static const int32_t dummyTriggerId = 1;
     status_t res;
 
@@ -3632,7 +3728,8 @@
  */
 
 Camera3Device::PreparerThread::PreparerThread() :
-        Thread(/*canCallJava*/false), mActive(false), mCancelNow(false) {
+        Thread(/*canCallJava*/false), mListener(nullptr),
+        mActive(false), mCancelNow(false) {
 }
 
 Camera3Device::PreparerThread::~PreparerThread() {
@@ -3688,8 +3785,6 @@
 }
 
 status_t Camera3Device::PreparerThread::clear() {
-    status_t res;
-
     Mutex::Autolock l(mLock);
 
     for (const auto& stream : mPendingStreams) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 2cd5af3..2aca57d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -23,12 +23,13 @@
 #include <utils/Mutex.h>
 #include <utils/Thread.h>
 #include <utils/KeyedVector.h>
+#include <utils/Timers.h>
 #include <hardware/camera3.h>
 #include <camera/CaptureResult.h>
-#include <camera/camera2/ICameraDeviceUser.h>
 
 #include "common/CameraDeviceBase.h"
 #include "device3/StatusTracker.h"
+#include "device3/Camera3BufferManager.h"
 
 /**
  * Function pointer types with C calling convention to
@@ -97,7 +98,8 @@
     // stream, reconfiguring device, and unpausing.
     virtual status_t createStream(sp<Surface> consumer,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+            int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID);
     virtual status_t createInputStream(
             uint32_t width, uint32_t height, int format,
             int *id);
@@ -144,12 +146,16 @@
 
     virtual status_t tearDown(int streamId);
 
+    virtual status_t addBufferListenerForStream(int streamId,
+            wp<camera3::Camera3StreamBufferListener> listener);
+
     virtual status_t prepare(int maxCount, int streamId);
 
     virtual uint32_t getDeviceVersion();
 
     virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
     ssize_t getPointCloudBufferSize() const;
+    ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height) const;
 
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
@@ -197,6 +203,10 @@
 
     uint32_t                   mDeviceVersion;
 
+    // whether Camera3Device should derive ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST for
+    // backward compatibility. Should not be changed after initialization.
+    bool                       mDerivePostRawSensKey = false;
+
     struct Size {
         uint32_t width;
         uint32_t height;
@@ -248,6 +258,12 @@
 
     /**** End scope for mLock ****/
 
+    // The offset converting from clock domain of other subsystem
+    // (video/hardware composer) to that of camera. Assumption is that this
+    // offset won't change during the life cycle of the camera device. In other
+    // words, camera device shouldn't be open during CPU suspend.
+    nsecs_t                    mTimestampOffset;
+
     typedef struct AeTriggerCancelOverride {
         bool applyAeLock;
         uint8_t aeLock;
@@ -351,6 +367,11 @@
     status_t           configureStreamsLocked();
 
     /**
+     * Cancel stream configuration that did not finish successfully.
+     */
+    void               cancelStreamsConfigurationLocked();
+
+    /**
      * Add a dummy stream to the current stream set as a workaround for
      * not allowing 0 streams in the camera HAL spec.
      */
@@ -389,6 +410,17 @@
      */
     Size getMaxJpegResolution() const;
 
+    /**
+     * Helper function to get the offset between MONOTONIC and BOOTTIME
+     * timestamp.
+     */
+    static nsecs_t getMonoToBoottimeOffset();
+
+    /**
+     * Helper function to map between legacy and new dataspace enums
+     */
+    static android_dataspace mapToLegacyDataspace(android_dataspace dataSpace);
+
     struct RequestTrigger {
         // Metadata tag number, e.g. android.control.aePrecaptureTrigger
         uint32_t metadataTag;
@@ -423,7 +455,7 @@
         /**
          * Call after stream (re)-configuration is completed.
          */
-        void     configurationComplete();
+        void     configurationComplete(bool isConstrainedHighSpeed);
 
         /**
          * Set or clear the list of repeating requests. Does not block
@@ -538,6 +570,9 @@
         // ERROR state to mark them as not having valid data. mNextRequests will be cleared.
         void cleanUpFailedRequests(bool sendRequestError);
 
+        // Stop the repeating request if any of its output streams is abandoned.
+        void checkAndStopRepeatingRequest();
+
         // Pause handling
         bool               waitIfPaused();
         void               unpauseForNewRequests();
@@ -551,6 +586,9 @@
         // Handle AE precapture trigger cancel for devices <= CAMERA_DEVICE_API_VERSION_3_2.
         void handleAePrecaptureCancelRequest(sp<CaptureRequest> request);
 
+        // Clear repeating requests. Must be called with mRequestLock held.
+        status_t clearRepeatingRequestsLocked(/*out*/ int64_t *lastFrameNumber = NULL);
+
         wp<Camera3Device>  mParent;
         wp<camera3::StatusTracker>  mStatusTracker;
         camera3_device_t  *mHal3Device;
@@ -605,6 +643,9 @@
 
         // Whether the device supports AE lock
         bool               mAeLockAvailable;
+
+        // Flag indicating if we should prepare video stream for video requests.
+        bool               mPrepareVideoStream;
     };
     sp<RequestThread> mRequestThread;
 
@@ -633,6 +674,10 @@
         // receives the shutter event.
         CameraMetadata pendingMetadata;
 
+        // The metadata of the partial results that framework receives from HAL so far
+        // and has sent out.
+        CameraMetadata collectedPartialResult;
+
         // Buffers are added by process_capture_result when output buffers
         // return from HAL but framework has not yet received the shutter
         // event. They will be returned to the streams when framework receives
@@ -643,19 +688,6 @@
         // CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
         AeTriggerCancelOverride_t aeTriggerCancelOverride;
 
-
-        // Fields used by the partial result only
-        struct PartialResultInFlight {
-            // Set by process_capture_result once 3A has been sent to clients
-            bool    haveSent3A;
-            // Result metadata collected so far, when partial results are in use
-            CameraMetadata collectedResult;
-
-            PartialResultInFlight():
-                    haveSent3A(false) {
-            }
-        } partialResult;
-
         // Default constructor needed by KeyedVector
         InFlightRequest() :
                 shutterTimestamp(0),
@@ -691,23 +723,6 @@
             const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
 
     /**
-     * For the partial result, check if all 3A state fields are available
-     * and if so, queue up 3A-only result to the client. Returns true if 3A
-     * is sent.
-     */
-    bool processPartial3AResult(uint32_t frameNumber,
-            const CameraMetadata& partial, const CaptureResultExtras& resultExtras);
-
-    // Helpers for reading and writing 3A metadata into to/from partial results
-    template<typename T>
-    bool get3AResult(const CameraMetadata& result, int32_t tag,
-            T* value, uint32_t frameNumber);
-
-    template<typename T>
-    bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value,
-            uint32_t frameNumber);
-
-    /**
      * Override result metadata for cancelling AE precapture trigger applied in
      * handleAePrecaptureCancelRequest().
      */
@@ -720,6 +735,13 @@
     sp<camera3::StatusTracker> mStatusTracker;
 
     /**
+     * Graphic buffer manager for output streams. Each device has a buffer manager, which is used
+     * by the output streams to get and return buffers if these streams are registered to this
+     * buffer manager.
+     */
+    sp<camera3::Camera3BufferManager> mBufferManager;
+
+    /**
      * Thread for preparing streams
      */
     class PreparerThread : private Thread, public virtual RefBase {
@@ -798,13 +820,24 @@
     void returnOutputBuffers(const camera3_stream_buffer_t *outputBuffers,
             size_t numBuffers, nsecs_t timestamp);
 
-    // Insert the capture result given the pending metadata, result extras,
+    // Send a partial capture result.
+    void sendPartialCaptureResult(const camera_metadata_t * partialResult,
+            const CaptureResultExtras &resultExtras, uint32_t frameNumber,
+            const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+
+    // Send a total capture result given the pending metadata and result extras,
     // partial results, and the frame number to the result queue.
     void sendCaptureResult(CameraMetadata &pendingMetadata,
             CaptureResultExtras &resultExtras,
             CameraMetadata &collectedPartialResult, uint32_t frameNumber,
             bool reprocess, const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
 
+    // Insert the result to the result queue after updating frame number and overriding AE
+    // trigger cancel.
+    // mOutputLock must be held when calling this function.
+    void insertResultLocked(CaptureResult *result, uint32_t frameNumber,
+            const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+
     /**** Scope for mInFlightLock ****/
 
     // Remove the in-flight request of the given index from mInFlightMap
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 1d9d04f..6354ef7 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -36,28 +36,28 @@
 
 }
 
-status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *buffer) {
+status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *) {
     ATRACE_CALL();
-    ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", mId);
+    ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", __FUNCTION__, mId);
     return INVALID_OPERATION;
 }
 
 status_t Camera3DummyStream::returnBufferLocked(
-        const camera3_stream_buffer &buffer,
-        nsecs_t timestamp) {
+        const camera3_stream_buffer &,
+        nsecs_t) {
     ATRACE_CALL();
-    ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+    ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", __FUNCTION__, mId);
     return INVALID_OPERATION;
 }
 
 status_t Camera3DummyStream::returnBufferCheckedLocked(
-            const camera3_stream_buffer &buffer,
-            nsecs_t timestamp,
-            bool output,
+            const camera3_stream_buffer &,
+            nsecs_t,
+            bool,
             /*out*/
-            sp<Fence> *releaseFenceOut) {
+            sp<Fence>*) {
     ATRACE_CALL();
-    ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+    ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", __FUNCTION__, mId);
     return INVALID_OPERATION;
 }
 
@@ -70,12 +70,19 @@
     Camera3IOStreamBase::dump(fd, args);
 }
 
-status_t Camera3DummyStream::setTransform(int transform) {
+status_t Camera3DummyStream::setTransform(int) {
     ATRACE_CALL();
     // Do nothing
     return OK;
 }
 
+status_t Camera3DummyStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
+    (void) buffer;
+    (void) fenceFd;
+    // Do nothing
+    return OK;
+}
+
 status_t Camera3DummyStream::configureQueueLocked() {
     // Do nothing
     return OK;
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 97c0c96..7b48daa 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -54,6 +54,8 @@
 
     status_t         setTransform(int transform);
 
+    virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
+
     /**
      * Return if this output stream is for video encoding.
      */
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 23b1c45..cb39244 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -31,9 +31,9 @@
 
 Camera3IOStreamBase::Camera3IOStreamBase(int id, camera3_stream_type_t type,
         uint32_t width, uint32_t height, size_t maxSize, int format,
-        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation, int setId) :
         Camera3Stream(id, type,
-                width, height, maxSize, format, dataSpace, rotation),
+                width, height, maxSize, format, dataSpace, rotation, setId),
         mTotalBufferCount(0),
         mHandoutTotalBufferCount(0),
         mHandoutOutputBufferCount(0),
@@ -42,7 +42,8 @@
 
     mCombinedFence = new Fence();
 
-    if (maxSize > 0 && format != HAL_PIXEL_FORMAT_BLOB) {
+    if (maxSize > 0 &&
+            (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE)) {
         ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
                 format);
         mState = STATE_ERROR;
@@ -123,6 +124,7 @@
     switch (mState) {
         case STATE_IN_RECONFIG:
         case STATE_CONFIGURED:
+        case STATE_ABANDONED:
             // OK
             break;
         default:
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index f5727e8..35dda39 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -34,7 +34,8 @@
   protected:
     Camera3IOStreamBase(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, size_t maxSize, int format,
-            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+            int setId = CAMERA3_STREAM_SET_ID_INVALID);
 
   public:
 
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 2504bfd..f781ded 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -116,6 +116,7 @@
                 bufferFound = true;
                 bufferItem = tmp;
                 mBuffersInFlight.erase(it);
+                break;
             }
         }
     }
@@ -169,7 +170,7 @@
     if (producer == NULL) {
         return BAD_VALUE;
     } else if (mProducer == NULL) {
-        ALOGE("%s: No input stream is configured");
+        ALOGE("%s: No input stream is configured", __FUNCTION__);
         return INVALID_OPERATION;
     }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 3f0a736..7b72144 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -34,30 +34,41 @@
 Camera3OutputStream::Camera3OutputStream(int id,
         sp<Surface> consumer,
         uint32_t width, uint32_t height, int format,
-        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+        nsecs_t timestampOffset, int setId) :
         Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height,
-                            /*maxSize*/0, format, dataSpace, rotation),
+                            /*maxSize*/0, format, dataSpace, rotation, setId),
         mConsumer(consumer),
         mTransform(0),
-        mTraceFirstBuffer(true) {
+        mTraceFirstBuffer(true),
+        mUseBufferManager(false),
+        mTimestampOffset(timestampOffset) {
 
     if (mConsumer == NULL) {
         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
         mState = STATE_ERROR;
     }
+
+    if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
+        mBufferReleasedListener = new BufferReleasedListener(this);
+    }
 }
 
 Camera3OutputStream::Camera3OutputStream(int id,
         sp<Surface> consumer,
         uint32_t width, uint32_t height, size_t maxSize, int format,
-        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+        nsecs_t timestampOffset, int setId) :
         Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height, maxSize,
-                            format, dataSpace, rotation),
+                            format, dataSpace, rotation, setId),
         mConsumer(consumer),
         mTransform(0),
-        mTraceFirstBuffer(true) {
+        mTraceFirstBuffer(true),
+        mUseMonoTimestamp(false),
+        mUseBufferManager(false),
+        mTimestampOffset(timestampOffset) {
 
-    if (format != HAL_PIXEL_FORMAT_BLOB) {
+    if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
         ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
                 format);
         mState = STATE_ERROR;
@@ -67,17 +78,29 @@
         ALOGE("%s: Consumer is NULL!", __FUNCTION__);
         mState = STATE_ERROR;
     }
+
+    if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
+        mBufferReleasedListener = new BufferReleasedListener(this);
+    }
 }
 
 Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
                                          uint32_t width, uint32_t height,
                                          int format,
                                          android_dataspace dataSpace,
-                                         camera3_stream_rotation_t rotation) :
+                                         camera3_stream_rotation_t rotation,
+                                         int setId) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
-                            format, dataSpace, rotation),
-        mTransform(0) {
+                            format, dataSpace, rotation, setId),
+        mTransform(0),
+        mTraceFirstBuffer(true),
+        mUseMonoTimestamp(false),
+        mUseBufferManager(false) {
+
+    if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
+        mBufferReleasedListener = new BufferReleasedListener(this);
+    }
 
     // Subclasses expected to initialize mConsumer themselves
 }
@@ -96,28 +119,64 @@
     }
 
     ANativeWindowBuffer* anb;
-    int fenceFd;
+    int fenceFd = -1;
+    bool gotBufferFromManager = false;
 
-    /**
-     * Release the lock briefly to avoid deadlock for below scenario:
-     * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
-     * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
-     * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
-     * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
-     * StreamingProcessor lock.
-     * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
-     * and try to lock bufferQueue lock.
-     * Then there is circular locking dependency.
-     */
-    sp<ANativeWindow> currentConsumer = mConsumer;
-    mLock.unlock();
+    if (mUseBufferManager) {
+        sp<GraphicBuffer> gb;
+        res = mBufferManager->getBufferForStream(getId(), getStreamSetId(), &gb, &fenceFd);
+        if (res == OK) {
+            // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
+            // successful return.
+            anb = gb.get();
+            res = mConsumer->attachBuffer(anb);
+            if (res != OK) {
+                ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+                return res;
+            }
+            gotBufferFromManager = true;
+            ALOGV("Stream %d: Attached new buffer", getId());
+        } else if (res == ALREADY_EXISTS) {
+            // Have sufficient free buffers already attached, can just
+            // dequeue from buffer queue
+            ALOGV("Stream %d: Reusing attached buffer", getId());
+            gotBufferFromManager = false;
+        } else if (res != OK) {
+            ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
+                    __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+    }
+    if (!gotBufferFromManager) {
+        /**
+         * Release the lock briefly to avoid deadlock for below scenario:
+         * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
+         * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
+         * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
+         * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
+         * StreamingProcessor lock.
+         * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
+         * and try to lock bufferQueue lock.
+         * Then there is circular locking dependency.
+         */
+        sp<ANativeWindow> currentConsumer = mConsumer;
+        mLock.unlock();
 
-    res = currentConsumer->dequeueBuffer(currentConsumer.get(), &anb, &fenceFd);
-    mLock.lock();
-    if (res != OK) {
-        ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
-                __FUNCTION__, mId, strerror(-res), res);
-        return res;
+        res = currentConsumer->dequeueBuffer(currentConsumer.get(), &anb, &fenceFd);
+        mLock.lock();
+        if (res != OK) {
+            ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
+                    __FUNCTION__, mId, strerror(-res), res);
+
+            // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is STATE_PREPARING,
+            // let prepareNextBuffer handle the error.)
+            if (res == NO_INIT && mState == STATE_CONFIGURED) {
+                mState = STATE_ABANDONED;
+            }
+
+            return res;
+        }
     }
 
     /**
@@ -183,6 +242,11 @@
             ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
                   " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
         }
+
+        if (mUseBufferManager) {
+            // Return this buffer back to buffer manager.
+            mBufferReleasedListener->onBufferReleased();
+        }
     } else {
         if (mTraceFirstBuffer && (stream_type == CAMERA3_STREAM_OUTPUT)) {
             {
@@ -193,7 +257,11 @@
             mTraceFirstBuffer = false;
         }
 
-        res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
+        /* Certain consumers (such as AudioSource or HardwareComposer) use
+         * MONOTONIC time, causing time misalignment if camera timestamp is
+         * in BOOTTIME. Do the conversion if necessary. */
+        res = native_window_set_buffers_timestamp(mConsumer.get(),
+                mUseMonoTimestamp ? timestamp - mTimestampOffset : timestamp);
         if (res != OK) {
             ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
                   __FUNCTION__, mId, strerror(-res), res);
@@ -270,9 +338,9 @@
 
     ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
 
-    // Configure consumer-side ANativeWindow interface
-    res = native_window_api_connect(mConsumer.get(),
-            NATIVE_WINDOW_API_CAMERA);
+    // Configure consumer-side ANativeWindow interface. The listener may be used
+    // to notify buffer manager (if it is used) of the returned buffers.
+    res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA, /*listener*/mBufferReleasedListener);
     if (res != OK) {
         ALOGE("%s: Unable to connect to native window for stream %d",
                 __FUNCTION__, mId);
@@ -350,6 +418,7 @@
     mHandoutTotalBufferCount = 0;
     mFrameCount = 0;
     mLastTimestamp = 0;
+    mUseMonoTimestamp = (isConsumedByHWComposer() | isVideoStream());
 
     res = native_window_set_buffer_count(mConsumer.get(),
             mTotalBufferCount);
@@ -366,6 +435,36 @@
                 __FUNCTION__, mTransform, strerror(-res), res);
     }
 
+    /**
+     * Camera3 Buffer manager is only supported by HAL3.3 onwards, as the older HALs requires
+     * buffers to be statically allocated for internal static buffer registration, while the
+     * buffers provided by buffer manager are really dynamically allocated. Camera3Device only
+     * sets the mBufferManager if device version is > HAL3.2, which guarantees that the buffer
+     * manager setup is skipped in below code. Note that HAL3.2 is also excluded here, as some
+     * HAL3.2 devices may not support the dynamic buffer registeration.
+     */
+    if (mBufferManager != 0 && mSetId > CAMERA3_STREAM_SET_ID_INVALID) {
+        uint32_t consumerUsage = 0;
+        getEndpointUsage(&consumerUsage);
+        StreamInfo streamInfo(
+                getId(), getStreamSetId(), getWidth(), getHeight(), getFormat(), getDataSpace(),
+                camera3_stream::usage | consumerUsage, mTotalBufferCount,
+                /*isConfigured*/true);
+        wp<Camera3OutputStream> weakThis(this);
+        res = mBufferManager->registerStream(weakThis,
+                streamInfo);
+        if (res == OK) {
+            // Disable buffer allocation for this BufferQueue, buffer manager will take over
+            // the buffer allocation responsibility.
+            mConsumer->getIGraphicBufferProducer()->allowAllocation(false);
+            mUseBufferManager = true;
+        } else {
+            ALOGE("%s: Unable to register stream %d to camera3 buffer manager, "
+                  "(error %d %s), fall back to BufferQueue for buffer management!",
+                  __FUNCTION__, mId, res, strerror(-res));
+        }
+    }
+
     return OK;
 }
 
@@ -376,6 +475,8 @@
         return res;
     }
 
+    ALOGV("%s: disconnecting stream %d from native window", __FUNCTION__, getId());
+
     res = native_window_api_disconnect(mConsumer.get(),
                                        NATIVE_WINDOW_API_CAMERA);
 
@@ -396,6 +497,21 @@
         return res;
     }
 
+    // Since device is already idle, there is no getBuffer call to buffer manager, unregister the
+    // stream at this point should be safe.
+    if (mUseBufferManager) {
+        res = mBufferManager->unregisterStream(getId(), getStreamSetId());
+        if (res != OK) {
+            ALOGE("%s: Unable to unregister stream %d from buffer manager "
+                    "(error %d %s)", __FUNCTION__, mId, res, strerror(-res));
+            mState = STATE_ERROR;
+            return res;
+        }
+        // Note that, to make prepare/teardown case work, we must not mBufferManager.clear(), as
+        // the stream is still in usable state after this call.
+        mUseBufferManager = false;
+    }
+
     mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
                                            : STATE_CONSTRUCTED;
     return OK;
@@ -437,6 +553,86 @@
     return (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) != 0;
 }
 
+status_t Camera3OutputStream::setBufferManager(sp<Camera3BufferManager> bufferManager) {
+    Mutex::Autolock l(mLock);
+    if (mState != STATE_CONSTRUCTED) {
+        ALOGE("%s: this method can only be called when stream in in CONSTRUCTED state.",
+                __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    mBufferManager = bufferManager;
+
+    return OK;
+}
+
+void Camera3OutputStream::BufferReleasedListener::onBufferReleased() {
+    sp<Camera3OutputStream> stream = mParent.promote();
+    if (stream == nullptr) {
+        ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
+        return;
+    }
+
+    Mutex::Autolock l(stream->mLock);
+    if (!(stream->mUseBufferManager)) {
+        return;
+    }
+
+    ALOGV("Stream %d: Buffer released", stream->getId());
+    status_t res = stream->mBufferManager->onBufferReleased(
+        stream->getId(), stream->getStreamSetId());
+    if (res != OK) {
+        ALOGE("%s: signaling buffer release to buffer manager failed: %s (%d).", __FUNCTION__,
+                strerror(-res), res);
+        stream->mState = STATE_ERROR;
+    }
+}
+
+status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
+    Mutex::Autolock l(mLock);
+
+    ALOGV("Stream %d: detachBuffer", getId());
+    if (buffer == nullptr) {
+        return BAD_VALUE;
+    }
+
+    sp<Fence> fence;
+    status_t res = mConsumer->detachNextBuffer(buffer, &fence);
+    if (res == NO_MEMORY) {
+        // This may rarely happen, which indicates that the released buffer was freed by other
+        // call (e.g., attachBuffer, dequeueBuffer etc.) before reaching here. We should notify the
+        // buffer manager that this buffer has been freed. It's not fatal, but should be avoided,
+        // therefore log a warning.
+        *buffer = 0;
+        ALOGW("%s: the released buffer has already been freed by the buffer queue!", __FUNCTION__);
+    } else if (res != OK) {
+        // Treat other errors as abandonment
+        ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
+        mState = STATE_ABANDONED;
+        return res;
+    }
+
+    if (fenceFd != nullptr) {
+        if (fence!= 0 && fence->isValid()) {
+            *fenceFd = fence->dup();
+        } else {
+            *fenceFd = -1;
+        }
+    }
+
+    return OK;
+}
+
+bool Camera3OutputStream::isConsumedByHWComposer() const {
+    uint32_t usage = 0;
+    status_t res = getEndpointUsage(&usage);
+    if (res != OK) {
+        ALOGE("%s: getting end point usage failed: %s (%d).", __FUNCTION__, strerror(-res), res);
+        return false;
+    }
+
+    return (usage & GRALLOC_USAGE_HW_COMPOSER) != 0;
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 3c083ec..7d28b05 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -18,16 +18,54 @@
 #define ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_H
 
 #include <utils/RefBase.h>
+#include <gui/IProducerListener.h>
 #include <gui/Surface.h>
 
 #include "Camera3Stream.h"
 #include "Camera3IOStreamBase.h"
 #include "Camera3OutputStreamInterface.h"
+#include "Camera3BufferManager.h"
 
 namespace android {
 
 namespace camera3 {
 
+class Camera3BufferManager;
+
+/**
+ * Stream info structure that holds the necessary stream info for buffer manager to use for
+ * buffer allocation and management.
+ */
+struct StreamInfo {
+    int streamId;
+    int streamSetId;
+    uint32_t width;
+    uint32_t height;
+    uint32_t format;
+    android_dataspace dataSpace;
+    uint32_t combinedUsage;
+    size_t totalBufferCount;
+    bool isConfigured;
+    StreamInfo(int id = CAMERA3_STREAM_ID_INVALID,
+            int setId = CAMERA3_STREAM_SET_ID_INVALID,
+            uint32_t w = 0,
+            uint32_t h = 0,
+            uint32_t fmt = 0,
+            android_dataspace ds = HAL_DATASPACE_UNKNOWN,
+            uint32_t usage = 0,
+            size_t bufferCount = 0,
+            bool configured = false) :
+                streamId(id),
+                streamSetId(setId),
+                width(w),
+                height(h),
+                format(fmt),
+                dataSpace(ds),
+                combinedUsage(usage),
+                totalBufferCount(bufferCount),
+                isConfigured(configured){}
+};
+
 /**
  * A class for managing a single stream of output data from the camera device.
  */
@@ -37,18 +75,24 @@
   public:
     /**
      * Set up a stream for formats that have 2 dimensions, such as RAW and YUV.
+     * A valid stream set id needs to be set to support buffer sharing between multiple
+     * streams.
      */
     Camera3OutputStream(int id, sp<Surface> consumer,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+            nsecs_t timestampOffset, int setId = CAMERA3_STREAM_SET_ID_INVALID);
 
     /**
      * Set up a stream for formats that have a variable buffer size for the same
      * dimensions, such as compressed JPEG.
+     * A valid stream set id needs to be set to support buffer sharing between multiple
+     * streams.
      */
     Camera3OutputStream(int id, sp<Surface> consumer,
             uint32_t width, uint32_t height, size_t maxSize, int format,
-            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+            nsecs_t timestampOffset, int setId = CAMERA3_STREAM_SET_ID_INVALID);
 
     virtual ~Camera3OutputStream();
 
@@ -68,11 +112,39 @@
      * Return if this output stream is for video encoding.
      */
     bool isVideoStream() const;
+    /**
+     * Return if this output stream is consumed by hardware composer.
+     */
+    bool isConsumedByHWComposer() const;
+
+    class BufferReleasedListener : public BnProducerListener {
+        public:
+          BufferReleasedListener(wp<Camera3OutputStream> parent) : mParent(parent) {}
+
+          /**
+          * Implementation of IProducerListener, used to notify this stream that the consumer
+          * has returned a buffer and it is ready to return to Camera3BufferManager for reuse.
+          */
+          virtual void onBufferReleased();
+
+        private:
+          wp<Camera3OutputStream> mParent;
+    };
+
+    virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
+
+    /**
+     * Set the graphic buffer manager to get/return the stream buffers.
+     *
+     * It is only legal to call this method when stream is in STATE_CONSTRUCTED state.
+     */
+    status_t setBufferManager(sp<Camera3BufferManager> bufferManager);
 
   protected:
     Camera3OutputStream(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+            int setId = CAMERA3_STREAM_SET_ID_INVALID);
 
     /**
      * Note that we release the lock briefly in this function
@@ -97,6 +169,31 @@
     // Name of Surface consumer
     String8           mConsumerName;
 
+    // Whether consumer assumes MONOTONIC timestamp
+    bool mUseMonoTimestamp;
+
+    /**
+     * GraphicBuffer manager this stream is registered to. Used to replace the buffer
+     * allocation/deallocation role of BufferQueue.
+     */
+    sp<Camera3BufferManager> mBufferManager;
+
+    /**
+     * Buffer released listener, used to notify the buffer manager that a buffer is released
+     * from consumer side.
+     */
+    sp<BufferReleasedListener> mBufferReleasedListener;
+
+    /**
+     * Flag indicating if the buffer manager is used to allocate the stream buffers
+     */
+    bool mUseBufferManager;
+
+    /**
+     * Timestamp offset for video and hardware composer consumed streams
+     */
+    nsecs_t mTimestampOffset;
+
     /**
      * Internal Camera3Stream interface
      */
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index df89b34..50dce55 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -39,6 +39,17 @@
      * Return if this output stream is for video encoding.
      */
     virtual bool isVideoStream() const = 0;
+
+    /**
+     * Detach an unused buffer from the stream.
+     *
+     * buffer must be non-null; fenceFd may null, and if it is non-null, but
+     * there is no valid fence associated with the detached buffer, it will be
+     * set to -1.
+     *
+     */
+    virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
+
 };
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 96299b3..96d62d4 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -47,13 +47,19 @@
 Camera3Stream::Camera3Stream(int id,
         camera3_stream_type type,
         uint32_t width, uint32_t height, size_t maxSize, int format,
-        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation, int setId) :
     camera3_stream(),
     mId(id),
+    mSetId(setId),
     mName(String8::format("Camera3Stream[%d]", id)),
     mMaxSize(maxSize),
     mState(STATE_CONSTRUCTED),
     mStatusId(StatusTracker::NO_STATUS_ID),
+    mStreamUnpreparable(false),
+    mOldUsage(0),
+    mOldMaxBuffers(0),
+    mPrepared(false),
+    mPreparedBufferIdx(0),
     mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX) {
 
     camera3_stream::stream_type = type;
@@ -66,8 +72,9 @@
     camera3_stream::max_buffers = 0;
     camera3_stream::priv = NULL;
 
-    if (format == HAL_PIXEL_FORMAT_BLOB && maxSize == 0) {
-        ALOGE("%s: BLOB format with size == 0", __FUNCTION__);
+    if ((format == HAL_PIXEL_FORMAT_BLOB || format == HAL_PIXEL_FORMAT_RAW_OPAQUE) &&
+            maxSize == 0) {
+        ALOGE("%s: BLOB or RAW_OPAQUE format with size == 0", __FUNCTION__);
         mState = STATE_ERROR;
     }
 }
@@ -76,6 +83,10 @@
     return mId;
 }
 
+int Camera3Stream::getStreamSetId() const {
+    return mSetId;
+}
+
 uint32_t Camera3Stream::getWidth() const {
     return camera3_stream::width;
 }
@@ -107,7 +118,7 @@
         case STATE_IN_CONFIG:
         case STATE_IN_RECONFIG:
             // Can start config again with no trouble; but don't redo
-            // oldUsage/oldMaxBuffers
+            // mOldUsage/mOldMaxBuffers
             return this;
         case STATE_CONFIGURED:
             if (hasOutstandingBuffersLocked()) {
@@ -121,8 +132,8 @@
             return NULL;
     }
 
-    oldUsage = camera3_stream::usage;
-    oldMaxBuffers = camera3_stream::max_buffers;
+    mOldUsage = camera3_stream::usage;
+    mOldMaxBuffers = camera3_stream::max_buffers;
 
     res = getEndpointUsage(&(camera3_stream::usage));
     if (res != OK) {
@@ -185,8 +196,8 @@
     // Check if the stream configuration is unchanged, and skip reallocation if
     // so. As documented in hardware/camera3.h:configure_streams().
     if (mState == STATE_IN_RECONFIG &&
-            oldUsage == camera3_stream::usage &&
-            oldMaxBuffers == camera3_stream::max_buffers) {
+            mOldUsage == camera3_stream::usage &&
+            mOldMaxBuffers == camera3_stream::max_buffers) {
         mState = STATE_CONFIGURED;
         return OK;
     }
@@ -239,8 +250,8 @@
             return INVALID_OPERATION;
     }
 
-    camera3_stream::usage = oldUsage;
-    camera3_stream::max_buffers = oldMaxBuffers;
+    camera3_stream::usage = mOldUsage;
+    camera3_stream::max_buffers = mOldMaxBuffers;
 
     mState = (mState == STATE_IN_RECONFIG) ? STATE_CONFIGURED : STATE_CONSTRUCTED;
     return OK;
@@ -257,7 +268,6 @@
     ATRACE_CALL();
 
     Mutex::Autolock l(mLock);
-    status_t res = OK;
 
     if (maxCount < 0) {
         ALOGE("%s: Stream %d: Can't prepare stream if max buffer count (%d) is < 0",
@@ -313,6 +323,11 @@
     return mState == STATE_PREPARING;
 }
 
+bool Camera3Stream::isAbandoned() const {
+    Mutex::Autolock l(mLock);
+    return mState == STATE_ABANDONED;
+}
+
 status_t Camera3Stream::prepareNextBuffer() {
     ATRACE_CALL();
 
@@ -329,7 +344,7 @@
     // Get next buffer - this may allocate, and take a while for large buffers
     res = getBufferLocked( &mPreparedBuffers.editItemAt(mPreparedBufferIdx) );
     if (res != OK) {
-        ALOGE("%s: Stream %d: Unable to allocate buffer %d during preparation",
+        ALOGE("%s: Stream %d: Unable to allocate buffer %zu during preparation",
                 __FUNCTION__, mId, mPreparedBufferIdx);
         return NO_INIT;
     }
@@ -466,16 +481,51 @@
     res = getBufferLocked(buffer);
     if (res == OK) {
         fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
+        if (buffer->buffer) {
+            mOutstandingBuffers.push_back(*buffer->buffer);
+        }
     }
 
     return res;
 }
 
+bool Camera3Stream::isOutstandingBuffer(const camera3_stream_buffer &buffer) {
+    if (buffer.buffer == nullptr) {
+        return false;
+    }
+
+    for (auto b : mOutstandingBuffers) {
+        if (b == *buffer.buffer) {
+            return true;
+        }
+    }
+    return false;
+}
+
+void Camera3Stream::removeOutstandingBuffer(const camera3_stream_buffer &buffer) {
+    if (buffer.buffer == nullptr) {
+        return;
+    }
+
+    for (auto b = mOutstandingBuffers.begin(); b != mOutstandingBuffers.end(); b++) {
+        if (*b == *buffer.buffer) {
+            mOutstandingBuffers.erase(b);
+            return;
+        }
+    }
+}
+
 status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer,
         nsecs_t timestamp) {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
 
+    // Check if this buffer is outstanding.
+    if (!isOutstandingBuffer(buffer)) {
+        ALOGE("%s: Stream %d: Returning an unknown buffer.", __FUNCTION__, mId);
+        return BAD_VALUE;
+    }
+
     /**
      * TODO: Check that the state is valid first.
      *
@@ -493,6 +543,7 @@
     // buffer to be returned.
     mOutputBufferReturnedSignal.signal();
 
+    removeOutstandingBuffer(buffer);
     return res;
 }
 
@@ -525,6 +576,9 @@
     res = getInputBufferLocked(buffer);
     if (res == OK) {
         fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/false);
+        if (buffer->buffer) {
+            mOutstandingBuffers.push_back(*buffer->buffer);
+        }
     }
 
     return res;
@@ -534,11 +588,19 @@
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
 
+    // Check if this buffer is outstanding.
+    if (!isOutstandingBuffer(buffer)) {
+        ALOGE("%s: Stream %d: Returning an unknown buffer.", __FUNCTION__, mId);
+        return BAD_VALUE;
+    }
+
     status_t res = returnInputBufferLocked(buffer);
     if (res == OK) {
         fireBufferListenersLocked(buffer, /*acquired*/false, /*output*/false);
         mInputBufferReturnedSignal.signal();
     }
+
+    removeOutstandingBuffer(buffer);
     return res;
 }
 
@@ -550,7 +612,7 @@
 }
 
 void Camera3Stream::fireBufferListenersLocked(
-        const camera3_stream_buffer& /*buffer*/, bool acquired, bool output) {
+        const camera3_stream_buffer& buffer, bool acquired, bool output) {
     List<wp<Camera3StreamBufferListener> >::iterator it, end;
 
     // TODO: finish implementing
@@ -558,6 +620,7 @@
     Camera3StreamBufferListener::BufferInfo info =
         Camera3StreamBufferListener::BufferInfo();
     info.mOutput = output;
+    info.mError = (buffer.status == CAMERA3_BUFFER_STATUS_ERROR);
     // TODO: rest of fields
 
     for (it = mBufferListenerList.begin(), end = mBufferListenerList.end();
@@ -708,7 +771,7 @@
     ALOGE("%s: This type of stream does not support input", __FUNCTION__);
     return INVALID_OPERATION;
 }
-status_t Camera3Stream::getInputBufferProducerLocked(sp<IGraphicBufferProducer> *producer) {
+status_t Camera3Stream::getInputBufferProducerLocked(sp<IGraphicBufferProducer>*) {
     ALOGE("%s: This type of stream does not support input", __FUNCTION__);
     return INVALID_OPERATION;
 }
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 753280b..0755700 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -95,6 +95,8 @@
  *    STATE_PREPARING      => STATE_CONFIGURED:
  *        When sufficient prepareNextBuffer calls have been made to allocate
  *        all stream buffers, or cancelPrepare is called.
+ *    STATE_CONFIGURED     => STATE_ABANDONED:
+ *        When the buffer queue of the stream is abandoned.
  *
  * Status Tracking:
  *    Each stream is tracked by StatusTracker as a separate component,
@@ -130,6 +132,11 @@
     int              getId() const;
 
     /**
+     * Get the output stream set id.
+     */
+    int              getStreamSetId() const;
+
+    /**
      * Get the stream's dimensions and format
      */
     uint32_t          getWidth() const;
@@ -348,8 +355,28 @@
     void             removeBufferListener(
             const sp<Camera3StreamBufferListener>& listener);
 
+    /**
+     * Return if the buffer queue of the stream is abandoned.
+     */
+    bool             isAbandoned() const;
+
   protected:
     const int mId;
+    /**
+     * Stream set id, used to indicate which group of this stream belongs to for buffer sharing
+     * across multiple streams.
+     *
+     * The default value is set to CAMERA3_STREAM_SET_ID_INVALID, which indicates that this stream
+     * doesn't intend to share buffers with any other streams, and this stream will fall back to
+     * the existing BufferQueue mechanism to manage the buffer allocations and buffer circulation.
+     * When a valid stream set id is set, this stream intends to use the Camera3BufferManager to
+     * manage the buffer allocations; the BufferQueue will only handle the buffer transaction
+     * between the producer and consumer. For this case, upon successfully registration, the streams
+     * with the same stream set id will potentially share the buffers allocated by
+     * Camera3BufferManager.
+     */
+    const int mSetId;
+
     const String8 mName;
     // Zero for formats with fixed buffer size for given dimensions.
     const size_t mMaxSize;
@@ -360,14 +387,16 @@
         STATE_IN_CONFIG,
         STATE_IN_RECONFIG,
         STATE_CONFIGURED,
-        STATE_PREPARING
+        STATE_PREPARING,
+        STATE_ABANDONED
     } mState;
 
     mutable Mutex mLock;
 
     Camera3Stream(int id, camera3_stream_type type,
             uint32_t width, uint32_t height, size_t maxSize, int format,
-            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+            int setId);
 
     /**
      * Interface to be implemented by derived classes
@@ -421,8 +450,8 @@
     bool mStreamUnpreparable;
 
   private:
-    uint32_t oldUsage;
-    uint32_t oldMaxBuffers;
+    uint32_t mOldUsage;
+    uint32_t mOldMaxBuffers;
     Condition mOutputBufferReturnedSignal;
     Condition mInputBufferReturnedSignal;
     static const nsecs_t kWaitForBufferDuration = 3000000000LL; // 3000 ms
@@ -436,6 +465,12 @@
 
     status_t        cancelPrepareLocked();
 
+    // Return whether the buffer is in the list of outstanding buffers.
+    bool isOutstandingBuffer(const camera3_stream_buffer& buffer);
+
+    // Remove the buffer from the list of outstanding buffers.
+    void removeOutstandingBuffer(const camera3_stream_buffer& buffer);
+
     // Tracking for PREPARING state
 
     // State of buffer preallocation. Only true if either prepareNextBuffer
@@ -447,7 +482,10 @@
     size_t mPreparedBufferIdx;
 
     // Number of buffers allocated on last prepare call.
-    int mLastMaxCount;
+    size_t mLastMaxCount;
+
+    // Outstanding buffers dequeued from the stream's buffer queue.
+    List<buffer_handle_t> mOutstandingBuffers;
 
 }; // class Camera3Stream
 
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
index 62ea6c0..2db333d 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
@@ -34,6 +34,7 @@
         uint32_t mScalingMode;
         int64_t mTimestamp;
         uint64_t mFrameNumber;
+        bool mError;
     };
 
     // Buffer was acquired by the HAL
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 54009ae..6cb7a54 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -26,6 +26,20 @@
 
 namespace camera3 {
 
+enum {
+    /**
+     * This stream set ID indicates that the set ID is invalid, and this stream doesn't intend to
+     * share buffers with any other stream. It is illegal to register this kind of stream to
+     * Camera3BufferManager.
+     */
+    CAMERA3_STREAM_SET_ID_INVALID = -1,
+
+    /**
+     * Invalid output stream ID.
+     */
+    CAMERA3_STREAM_ID_INVALID = -1,
+};
+
 class StatusTracker;
 
 /**
@@ -45,6 +59,11 @@
     virtual int      getId() const = 0;
 
     /**
+     * Get the output stream set id.
+     */
+    virtual int      getStreamSetId() const = 0;
+
+    /**
      * Get the stream's dimensions and format
      */
     virtual uint32_t getWidth() const = 0;
@@ -243,6 +262,11 @@
     virtual status_t disconnect() = 0;
 
     /**
+     * Return if the buffer queue of the stream is abandoned.
+     */
+    virtual bool isAbandoned() const = 0;
+
+    /**
      * Debug dump of the stream's state.
      */
     virtual void     dump(int fd, const Vector<String16> &args) const = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index eefcb44..7414c4c 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -115,8 +115,7 @@
         Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL,
                             width, height,
                             HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
-                            HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0),
-        mDepth(bufferCount) {
+                            HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0) {
 
     sp<IGraphicBufferProducer> producer;
     sp<IGraphicBufferConsumer> consumer;
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
index 5323a49..12369cf 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.h
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.h
@@ -73,7 +73,6 @@
 
   private:
 
-    int mDepth;
     // Input buffers pending to be queued into HAL
     List<sp<RingBufferConsumer::PinnedBufferItem> > mInputBufferQueue;
     sp<RingBufferConsumer>                          mProducer;
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index 8cd6800..3d54460 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -229,7 +229,7 @@
 
         // item.mGraphicBuffer was populated with the proper graphic-buffer
         // at acquire even if it was previously acquired
-        err = addReleaseFenceLocked(item.mBuf,
+        err = addReleaseFenceLocked(item.mSlot,
                 item.mGraphicBuffer, item.mFence);
 
         if (err != OK) {
@@ -244,7 +244,7 @@
 
         // item.mGraphicBuffer was populated with the proper graphic-buffer
         // at acquire even if it was previously acquired
-        err = releaseBufferLocked(item.mBuf, item.mGraphicBuffer,
+        err = releaseBufferLocked(item.mSlot, item.mGraphicBuffer,
                                   EGL_NO_DISPLAY,
                                   EGL_NO_SYNC_KHR);
         if (err != OK) {
@@ -256,9 +256,7 @@
         BI_LOGV("Buffer timestamp %" PRId64 ", frame %" PRIu64 " evicted",
                 item.mTimestamp, item.mFrameNumber);
 
-        size_t currentSize = mBufferItemList.size();
         mBufferItemList.erase(accIt);
-        assert(mBufferItemList.size() == currentSize - 1);
     } else {
         BI_LOGW("All buffers pinned, could not find any to release");
         return NO_BUFFER_AVAILABLE;
@@ -318,7 +316,7 @@
 
         mLatestTimestamp = item.mTimestamp;
 
-        item.mGraphicBuffer = mSlots[item.mBuf].mGraphicBuffer;
+        item.mGraphicBuffer = mSlots[item.mSlot].mGraphicBuffer;
     } // end of mMutex lock
 
     ConsumerBase::onFrameAvailable(item);
@@ -335,7 +333,7 @@
 
         RingBufferItem& find = *it;
         if (item.mGraphicBuffer == find.mGraphicBuffer) {
-            status_t res = addReleaseFenceLocked(item.mBuf,
+            status_t res = addReleaseFenceLocked(item.mSlot,
                     item.mGraphicBuffer, item.mFence);
 
             if (res != OK) {
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h
index 83e7298..28dc5d5 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.h
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h
@@ -133,7 +133,7 @@
         }
 
         bool isEmpty() {
-            return mBufferItem.mBuf == BufferQueue::INVALID_BUFFER_SLOT;
+            return mBufferItem.mSlot == BufferQueue::INVALID_BUFFER_SLOT;
         }
 
         BufferItem& getBufferItem() { return mBufferItem; }
@@ -189,4 +189,4 @@
 
 } // namespace android
 
-#endif // ANDROID_GUI_CPUCONSUMER_H
+#endif // ANDROID_GUI_RINGBUFFERCONSUMER_H
diff --git a/services/camera/libcameraservice/utils/AutoConditionLock.cpp b/services/camera/libcameraservice/utils/AutoConditionLock.cpp
index c8ee965..ed80a95 100644
--- a/services/camera/libcameraservice/utils/AutoConditionLock.cpp
+++ b/services/camera/libcameraservice/utils/AutoConditionLock.cpp
@@ -24,13 +24,15 @@
 
 // Locks manager-owned mutex
 AutoConditionLock::AutoConditionLock(const std::shared_ptr<WaitableMutexWrapper>& manager) :
-        mManager{manager}, mAutoLock{manager->mMutex} {}
+        mManager{manager}, mAutoLock{manager->mMutex}, mAcquired(false) {}
 
 // Unlocks manager-owned mutex
 AutoConditionLock::~AutoConditionLock() {
     // Unset the condition and wake everyone up before releasing lock
-    mManager->mState = false;
-    mManager->mCondition.broadcast();
+    if (mAcquired) {
+        mManager->mState = false;
+        mManager->mCondition.broadcast();
+    }
 }
 
 std::unique_ptr<AutoConditionLock> AutoConditionLock::waitAndAcquire(
@@ -59,6 +61,7 @@
 
     // Set the condition and return
     manager->mState = true;
+    scopedLock->mAcquired = true;
     return scopedLock;
 }
 
@@ -84,6 +87,7 @@
 
     // Set the condition and return
     manager->mState = true;
+    scopedLock->mAcquired = true;
     return scopedLock;
 }
 
diff --git a/services/camera/libcameraservice/utils/AutoConditionLock.h b/services/camera/libcameraservice/utils/AutoConditionLock.h
index 9a3eafc..b7f167b 100644
--- a/services/camera/libcameraservice/utils/AutoConditionLock.h
+++ b/services/camera/libcameraservice/utils/AutoConditionLock.h
@@ -92,6 +92,7 @@
 
     std::shared_ptr<WaitableMutexWrapper> mManager;
     Mutex::Autolock mAutoLock;
+    bool mAcquired;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
index d10dbc9..13ca16d 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.h
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -24,7 +24,7 @@
 namespace android {
 namespace camera3 {
 
-class CameraTracesImpl;
+struct CameraTracesImpl;
 
 // Collect a list of the process's stack traces
 class CameraTraces {
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
new file mode 100644
index 0000000..a5f0751
--- /dev/null
+++ b/services/mediacodec/Android.mk
@@ -0,0 +1,30 @@
+LOCAL_PATH := $(call my-dir)
+
+# service library
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := MediaCodecService.cpp
+LOCAL_SHARED_LIBRARIES := libmedia libbinder libutils liblog libstagefright_omx
+LOCAL_C_INCLUDES := \
+    $(TOP)/frameworks/av/media/libstagefright \
+    $(TOP)/frameworks/native/include/media/openmax
+LOCAL_MODULE:= libmediacodecservice
+LOCAL_32_BIT_ONLY := true
+include $(BUILD_SHARED_LIBRARY)
+
+
+# service executable
+include $(CLEAR_VARS)
+LOCAL_REQUIRED_MODULES_arm := mediacodec-seccomp.policy
+LOCAL_SRC_FILES := main_codecservice.cpp minijail/minijail.cpp
+LOCAL_SHARED_LIBRARIES := libmedia libmediacodecservice libbinder libutils \
+	liblog libminijail
+LOCAL_C_INCLUDES := \
+    $(TOP)/frameworks/av/media/libstagefright \
+    $(TOP)/frameworks/native/include/media/openmax
+LOCAL_MODULE:= mediacodec
+LOCAL_32_BIT_ONLY := true
+LOCAL_INIT_RC := mediacodec.rc
+include $(BUILD_EXECUTABLE)
+
+include $(call all-makefiles-under, $(LOCAL_PATH))
+
diff --git a/services/mediacodec/MODULE_LICENSE_APACHE2 b/services/mediacodec/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/services/mediacodec/MODULE_LICENSE_APACHE2
diff --git a/services/mediacodec/MediaCodecService.cpp b/services/mediacodec/MediaCodecService.cpp
new file mode 100644
index 0000000..fc1e5d9
--- /dev/null
+++ b/services/mediacodec/MediaCodecService.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaCodecService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "MediaCodecService.h"
+
+namespace android {
+
+sp<IOMX> MediaCodecService::getOMX() {
+
+    Mutex::Autolock autoLock(mLock);
+
+    if (mOMX.get() == NULL) {
+        mOMX = new OMX;
+    }
+
+    return mOMX;
+}
+
+
+status_t MediaCodecService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+        uint32_t flags)
+{
+    return BnMediaCodecService::onTransact(code, data, reply, flags);
+}
+
+}   // namespace android
diff --git a/services/mediacodec/MediaCodecService.h b/services/mediacodec/MediaCodecService.h
new file mode 100644
index 0000000..d64debb
--- /dev/null
+++ b/services/mediacodec/MediaCodecService.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_CODEC_SERVICE_H
+#define ANDROID_MEDIA_CODEC_SERVICE_H
+
+#include <binder/BinderService.h>
+#include <media/IMediaCodecService.h>
+#include <include/OMX.h>
+
+namespace android {
+
+class MediaCodecService : public BinderService<MediaCodecService>, public BnMediaCodecService
+{
+    friend class BinderService<MediaCodecService>;    // for MediaCodecService()
+public:
+    MediaCodecService() : BnMediaCodecService() { }
+    virtual ~MediaCodecService() { }
+    virtual void onFirstRef() { }
+
+    static const char*  getServiceName() { return "media.codec"; }
+
+    virtual sp<IOMX>    getOMX();
+
+    virtual status_t    onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+                                uint32_t flags);
+
+private:
+    Mutex               mLock;
+    sp<IOMX>            mOMX;
+};
+
+}   // namespace android
+
+#endif  // ANDROID_MEDIA_CODEC_SERVICE_H
diff --git a/services/mediacodec/NOTICE b/services/mediacodec/NOTICE
new file mode 100644
index 0000000..34bdaf1
--- /dev/null
+++ b/services/mediacodec/NOTICE
@@ -0,0 +1,190 @@
+
+   Copyright (c) 2005-2015, The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
new file mode 100644
index 0000000..a2868c1
--- /dev/null
+++ b/services/mediacodec/main_codecservice.cpp
@@ -0,0 +1,47 @@
+/*
+**
+** Copyright 2016, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "mediacodec"
+//#define LOG_NDEBUG 0
+
+#include <fcntl.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <binder/IPCThreadState.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+// from LOCAL_C_INCLUDES
+#include "MediaCodecService.h"
+#include "minijail/minijail.h"
+
+using namespace android;
+
+int main(int argc __unused, char** argv)
+{
+    ALOGI("@@@ mediacodecservice starting");
+    signal(SIGPIPE, SIG_IGN);
+    MiniJail();
+
+    strcpy(argv[0], "media.codec");
+    sp<ProcessState> proc(ProcessState::self());
+    sp<IServiceManager> sm = defaultServiceManager();
+    MediaCodecService::instantiate();
+    ProcessState::self()->startThreadPool();
+    IPCThreadState::self()->joinThreadPool();
+}
diff --git a/services/mediacodec/mediacodec.rc b/services/mediacodec/mediacodec.rc
new file mode 100644
index 0000000..d78e0a4
--- /dev/null
+++ b/services/mediacodec/mediacodec.rc
@@ -0,0 +1,6 @@
+service mediacodec /system/bin/mediacodec
+    class main
+    user mediacodec
+    group camera drmrpc mediadrm
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediacodec/minijail/Android.mk b/services/mediacodec/minijail/Android.mk
new file mode 100644
index 0000000..d2becb4
--- /dev/null
+++ b/services/mediacodec/minijail/Android.mk
@@ -0,0 +1,28 @@
+LOCAL_PATH := $(call my-dir)
+
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64))
+include $(CLEAR_VARS)
+LOCAL_MODULE := mediacodec-seccomp.policy
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+
+# mediacodec runs in 32-bit combatibility mode. For 64 bit architectures,
+# use the 32 bit policy
+ifdef TARGET_2ND_ARCH
+    LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediacodec-seccomp-$(TARGET_2ND_ARCH).policy
+else
+    LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediacodec-seccomp-$(TARGET_ARCH).policy
+endif
+
+# allow device specific additions to the syscall whitelist
+ifneq (,$(wildcard $(BOARD_SECCOMP_POLICY)/mediacodec-seccomp.policy))
+    LOCAL_SRC_FILES += $(BOARD_SECCOMP_POLICY)/mediacodec-seccomp.policy
+endif
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_SRC_FILES)
+	@mkdir -p $(dir $@)
+	$(hide) cat > $@ $^
+
+endif
diff --git a/services/mediacodec/minijail/minijail.cpp b/services/mediacodec/minijail/minijail.cpp
new file mode 100644
index 0000000..72bb1af
--- /dev/null
+++ b/services/mediacodec/minijail/minijail.cpp
@@ -0,0 +1,50 @@
+/*
+**
+** Copyright 2016, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <cutils/log.h>
+#include <libminijail.h>
+
+#include "minijail.h"
+
+namespace android {
+
+/* Must match location in Android.mk */
+static const char kSeccompFilePath[] = "/system/etc/seccomp_policy/mediacodec-seccomp.policy";
+
+int MiniJail()
+{
+    /* no seccomp policy for this architecture */
+    if (access(kSeccompFilePath, R_OK) == -1) {
+        ALOGW("No seccomp filter defined for this architecture.");
+        return 0;
+    }
+
+    struct minijail *jail = minijail_new();
+    if (jail == NULL) {
+        ALOGW("Failed to create minijail.");
+        return -1;
+    }
+
+    minijail_no_new_privs(jail);
+    minijail_log_seccomp_filter_failures(jail);
+    minijail_use_seccomp_filter(jail);
+    minijail_parse_seccomp_filters(jail, kSeccompFilePath);
+    minijail_enter(jail);
+    minijail_destroy(jail);
+    return 0;
+}
+}
diff --git a/services/mediacodec/minijail/minijail.h b/services/mediacodec/minijail/minijail.h
new file mode 100644
index 0000000..ae01470
--- /dev/null
+++ b/services/mediacodec/minijail/minijail.h
@@ -0,0 +1,20 @@
+/*
+**
+** Copyright 2016, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+namespace android {
+int MiniJail();
+}
diff --git a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
new file mode 100644
index 0000000..0afaa15
--- /dev/null
+++ b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
@@ -0,0 +1,51 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+futex: 1
+ioctl: 1
+write: 1
+prctl: 1
+clock_gettime: 1
+getpriority: 1
+read: 1
+close: 1
+writev: 1
+dup: 1
+ppoll: 1
+mmap2: 1
+munmap: 1
+mprotect: 1
+madvise: 1
+openat: 1
+sigaltstack: 1
+clone: 1
+setpriority: 1
+getuid32: 1
+fstat64: 1
+pread64: 1
+faccessat: 1
+readlinkat: 1
+exit: 1
+rt_sigprocmask: 1
+set_tid_address: 1
+restart_syscall: 1
+exit_group: 1
+rt_sigreturn: 1
+pipe2: 1
+gettimeofday: 1
+sched_yield: 1
+nanosleep: 1
+lseek: 1
+sched_get_priority_max: 1
+sched_get_priority_min: 1
+statfs64: 1
+sched_setscheduler: 1
+fstatat64: 1
+ugetrlimit: 1
+
+# for attaching to debuggerd on process crash
+sigaction: 1
+tgkill: 1
+socket: 1
+connect: 1
+fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
new file mode 100644
index 0000000..8baaf13
--- /dev/null
+++ b/services/mediadrm/Android.mk
@@ -0,0 +1,45 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+
+LOCAL_SRC_FILES:= \
+    MediaDrmService.cpp \
+    main_mediadrmserver.cpp
+
+LOCAL_SHARED_LIBRARIES:= \
+    libbinder \
+    libcutils \
+    liblog \
+    libmedia \
+    libmediadrm \
+    libmediaplayerservice \
+    libstagefright \
+    libui \
+    libutils \
+
+LOCAL_C_INCLUDES := \
+    frameworks/av/media/libmediaplayerservice \
+
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
+LOCAL_MODULE:= mediadrmserver
+LOCAL_32_BIT_ONLY := true
+
+LOCAL_INIT_RC := mediadrmserver.rc
+
+include $(BUILD_EXECUTABLE)
diff --git a/services/mediadrm/MediaDrmService.cpp b/services/mediadrm/MediaDrmService.cpp
new file mode 100644
index 0000000..331c568
--- /dev/null
+++ b/services/mediadrm/MediaDrmService.cpp
@@ -0,0 +1,45 @@
+/*
+**
+** Copyright 2008, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+// Proxy for media player implementations
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaDrmService"
+
+#include "MediaDrmService.h"
+
+#include <binder/IServiceManager.h>
+#include <media/Crypto.h>
+#include <media/Drm.h>
+#include <utils/Log.h>
+
+namespace android {
+
+void MediaDrmService::instantiate() {
+    defaultServiceManager()->addService(
+            String16("media.drm"), new MediaDrmService());
+}
+
+sp<ICrypto> MediaDrmService::makeCrypto() {
+    return new Crypto;
+}
+
+sp<IDrm> MediaDrmService::makeDrm() {
+    return new Drm;
+}
+
+} // namespace android
diff --git a/services/mediadrm/MediaDrmService.h b/services/mediadrm/MediaDrmService.h
new file mode 100644
index 0000000..ecc2da7
--- /dev/null
+++ b/services/mediadrm/MediaDrmService.h
@@ -0,0 +1,48 @@
+/*
+**
+** Copyright 2008, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_MEDIADRMSERVICE_H
+#define ANDROID_MEDIADRMSERVICE_H
+
+#include <arpa/inet.h>
+
+#include <utils/threads.h>
+
+#include <media/Metadata.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/IMediaDrmService.h>
+
+namespace android {
+
+class MediaDrmService : public BnMediaDrmService
+{
+public:
+    static void instantiate();
+
+    // IMediaDrmService interface
+    virtual sp<ICrypto> makeCrypto();
+    virtual sp<IDrm> makeDrm();
+private:
+    MediaDrmService() {}
+    virtual ~MediaDrmService() {}
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_MEDIADRMSERVICE_H
diff --git a/services/mediadrm/main_mediadrmserver.cpp b/services/mediadrm/main_mediadrmserver.cpp
new file mode 100644
index 0000000..b767b8c
--- /dev/null
+++ b/services/mediadrm/main_mediadrmserver.cpp
@@ -0,0 +1,43 @@
+/*
+**
+** Copyright 2008, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "mediaserver"
+//#define LOG_NDEBUG 0
+
+#include <fcntl.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <binder/IPCThreadState.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+#include <cutils/properties.h>
+#include <utils/Log.h>
+#include "MediaDrmService.h"
+
+using namespace android;
+
+int main()
+{
+    signal(SIGPIPE, SIG_IGN);
+
+    sp<ProcessState> proc(ProcessState::self());
+    sp<IServiceManager> sm = defaultServiceManager();
+    ALOGI("ServiceManager: %p", sm.get());
+    MediaDrmService::instantiate();
+    ProcessState::self()->startThreadPool();
+    IPCThreadState::self()->joinThreadPool();
+}
diff --git a/services/mediadrm/mediadrmserver.rc b/services/mediadrm/mediadrmserver.rc
new file mode 100644
index 0000000..359c2cf
--- /dev/null
+++ b/services/mediadrm/mediadrmserver.rc
@@ -0,0 +1,6 @@
+service mediadrm /system/bin/mediadrmserver
+    class main
+    user media
+    group mediadrm drmrpc
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediadrm/tests/Android.mk b/services/mediadrm/tests/Android.mk
new file mode 100644
index 0000000..8cbf782
--- /dev/null
+++ b/services/mediadrm/tests/Android.mk
@@ -0,0 +1,27 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := DrmSessionManager_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+	DrmSessionManager_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	liblog \
+	libmediaplayerservice \
+	libutils \
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/include \
+	frameworks/av/media/libmediaplayerservice \
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+LOCAL_32_BIT_ONLY := true
+
+include $(BUILD_NATIVE_TEST)
+
diff --git a/services/mediadrm/tests/DrmSessionManager_test.cpp b/services/mediadrm/tests/DrmSessionManager_test.cpp
new file mode 100644
index 0000000..de350a1
--- /dev/null
+++ b/services/mediadrm/tests/DrmSessionManager_test.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmSessionManager_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include "Drm.h"
+#include "DrmSessionClientInterface.h"
+#include "DrmSessionManager.h"
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/ProcessInfoInterface.h>
+
+namespace android {
+
+struct FakeProcessInfo : public ProcessInfoInterface {
+    FakeProcessInfo() {}
+    virtual ~FakeProcessInfo() {}
+
+    virtual bool getPriority(int pid, int* priority) {
+        // For testing, use pid as priority.
+        // Lower the value higher the priority.
+        *priority = pid;
+        return true;
+    }
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(FakeProcessInfo);
+};
+
+struct FakeDrm : public DrmSessionClientInterface {
+    FakeDrm() {}
+    virtual ~FakeDrm() {}
+
+    virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
+        mReclaimedSessions.push_back(sessionId);
+        return true;
+    }
+
+    const Vector<Vector<uint8_t> >& reclaimedSessions() const {
+        return mReclaimedSessions;
+    }
+
+private:
+    Vector<Vector<uint8_t> > mReclaimedSessions;
+
+    DISALLOW_EVIL_CONSTRUCTORS(FakeDrm);
+};
+
+static const int kTestPid1 = 30;
+static const int kTestPid2 = 20;
+static const uint8_t kTestSessionId1[] = {1, 2, 3};
+static const uint8_t kTestSessionId2[] = {4, 5, 6, 7, 8};
+static const uint8_t kTestSessionId3[] = {9, 0};
+
+class DrmSessionManagerTest : public ::testing::Test {
+public:
+    DrmSessionManagerTest()
+        : mDrmSessionManager(new DrmSessionManager(new FakeProcessInfo())),
+          mTestDrm1(new FakeDrm()),
+          mTestDrm2(new FakeDrm()) {
+        GetSessionId(kTestSessionId1, ARRAY_SIZE(kTestSessionId1), &mSessionId1);
+        GetSessionId(kTestSessionId2, ARRAY_SIZE(kTestSessionId2), &mSessionId2);
+        GetSessionId(kTestSessionId3, ARRAY_SIZE(kTestSessionId3), &mSessionId3);
+    }
+
+protected:
+    static void GetSessionId(const uint8_t* ids, size_t num, Vector<uint8_t>* sessionId) {
+        for (size_t i = 0; i < num; ++i) {
+            sessionId->push_back(ids[i]);
+        }
+    }
+
+    static void ExpectEqSessionInfo(const SessionInfo& info, sp<DrmSessionClientInterface> drm,
+            const Vector<uint8_t>& sessionId, int64_t timeStamp) {
+        EXPECT_EQ(drm, info.drm);
+        EXPECT_TRUE(isEqualSessionId(sessionId, info.sessionId));
+        EXPECT_EQ(timeStamp, info.timeStamp);
+    }
+
+    void addSession() {
+        mDrmSessionManager->addSession(kTestPid1, mTestDrm1, mSessionId1);
+        mDrmSessionManager->addSession(kTestPid2, mTestDrm2, mSessionId2);
+        mDrmSessionManager->addSession(kTestPid2, mTestDrm2, mSessionId3);
+        const PidSessionInfosMap& map = sessionMap();
+        EXPECT_EQ(2u, map.size());
+        ssize_t index1 = map.indexOfKey(kTestPid1);
+        ASSERT_GE(index1, 0);
+        const SessionInfos& infos1 = map[index1];
+        EXPECT_EQ(1u, infos1.size());
+        ExpectEqSessionInfo(infos1[0], mTestDrm1, mSessionId1, 0);
+
+        ssize_t index2 = map.indexOfKey(kTestPid2);
+        ASSERT_GE(index2, 0);
+        const SessionInfos& infos2 = map[index2];
+        EXPECT_EQ(2u, infos2.size());
+        ExpectEqSessionInfo(infos2[0], mTestDrm2, mSessionId2, 1);
+        ExpectEqSessionInfo(infos2[1], mTestDrm2, mSessionId3, 2);
+    }
+
+    const PidSessionInfosMap& sessionMap() {
+        return mDrmSessionManager->mSessionMap;
+    }
+
+    void testGetLowestPriority() {
+        int pid;
+        int priority;
+        EXPECT_FALSE(mDrmSessionManager->getLowestPriority_l(&pid, &priority));
+
+        addSession();
+        EXPECT_TRUE(mDrmSessionManager->getLowestPriority_l(&pid, &priority));
+
+        EXPECT_EQ(kTestPid1, pid);
+        FakeProcessInfo processInfo;
+        int priority1;
+        processInfo.getPriority(kTestPid1, &priority1);
+        EXPECT_EQ(priority1, priority);
+    }
+
+    void testGetLeastUsedSession() {
+        sp<DrmSessionClientInterface> drm;
+        Vector<uint8_t> sessionId;
+        EXPECT_FALSE(mDrmSessionManager->getLeastUsedSession_l(kTestPid1, &drm, &sessionId));
+
+        addSession();
+
+        EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid1, &drm, &sessionId));
+        EXPECT_EQ(mTestDrm1, drm);
+        EXPECT_TRUE(isEqualSessionId(mSessionId1, sessionId));
+
+        EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid2, &drm, &sessionId));
+        EXPECT_EQ(mTestDrm2, drm);
+        EXPECT_TRUE(isEqualSessionId(mSessionId2, sessionId));
+
+        // mSessionId2 is no longer the least used session.
+        mDrmSessionManager->useSession(mSessionId2);
+        EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid2, &drm, &sessionId));
+        EXPECT_EQ(mTestDrm2, drm);
+        EXPECT_TRUE(isEqualSessionId(mSessionId3, sessionId));
+    }
+
+    sp<DrmSessionManager> mDrmSessionManager;
+    sp<FakeDrm> mTestDrm1;
+    sp<FakeDrm> mTestDrm2;
+    Vector<uint8_t> mSessionId1;
+    Vector<uint8_t> mSessionId2;
+    Vector<uint8_t> mSessionId3;
+};
+
+TEST_F(DrmSessionManagerTest, addSession) {
+    addSession();
+}
+
+TEST_F(DrmSessionManagerTest, useSession) {
+    addSession();
+
+    mDrmSessionManager->useSession(mSessionId1);
+    mDrmSessionManager->useSession(mSessionId3);
+
+    const PidSessionInfosMap& map = sessionMap();
+    const SessionInfos& infos1 = map.valueFor(kTestPid1);
+    const SessionInfos& infos2 = map.valueFor(kTestPid2);
+    ExpectEqSessionInfo(infos1[0], mTestDrm1, mSessionId1, 3);
+    ExpectEqSessionInfo(infos2[1], mTestDrm2, mSessionId3, 4);
+}
+
+TEST_F(DrmSessionManagerTest, removeSession) {
+    addSession();
+
+    mDrmSessionManager->removeSession(mSessionId2);
+
+    const PidSessionInfosMap& map = sessionMap();
+    EXPECT_EQ(2u, map.size());
+    const SessionInfos& infos1 = map.valueFor(kTestPid1);
+    const SessionInfos& infos2 = map.valueFor(kTestPid2);
+    EXPECT_EQ(1u, infos1.size());
+    EXPECT_EQ(1u, infos2.size());
+    // mSessionId2 has been removed.
+    ExpectEqSessionInfo(infos2[0], mTestDrm2, mSessionId3, 2);
+}
+
+TEST_F(DrmSessionManagerTest, removeDrm) {
+    addSession();
+
+    sp<FakeDrm> drm = new FakeDrm;
+    const uint8_t ids[] = {123};
+    Vector<uint8_t> sessionId;
+    GetSessionId(ids, ARRAY_SIZE(ids), &sessionId);
+    mDrmSessionManager->addSession(kTestPid2, drm, sessionId);
+
+    mDrmSessionManager->removeDrm(mTestDrm2);
+
+    const PidSessionInfosMap& map = sessionMap();
+    const SessionInfos& infos2 = map.valueFor(kTestPid2);
+    EXPECT_EQ(1u, infos2.size());
+    // mTestDrm2 has been removed.
+    ExpectEqSessionInfo(infos2[0], drm, sessionId, 3);
+}
+
+TEST_F(DrmSessionManagerTest, reclaimSession) {
+    EXPECT_FALSE(mDrmSessionManager->reclaimSession(kTestPid1));
+    addSession();
+
+    // calling pid priority is too low
+    EXPECT_FALSE(mDrmSessionManager->reclaimSession(50));
+
+    EXPECT_TRUE(mDrmSessionManager->reclaimSession(10));
+    EXPECT_EQ(1u, mTestDrm1->reclaimedSessions().size());
+    EXPECT_TRUE(isEqualSessionId(mSessionId1, mTestDrm1->reclaimedSessions()[0]));
+
+    mDrmSessionManager->removeSession(mSessionId1);
+
+    // add a session from a higher priority process.
+    sp<FakeDrm> drm = new FakeDrm;
+    const uint8_t ids[] = {1, 3, 5};
+    Vector<uint8_t> sessionId;
+    GetSessionId(ids, ARRAY_SIZE(ids), &sessionId);
+    mDrmSessionManager->addSession(15, drm, sessionId);
+
+    EXPECT_TRUE(mDrmSessionManager->reclaimSession(18));
+    EXPECT_EQ(1u, mTestDrm2->reclaimedSessions().size());
+    // mSessionId2 is reclaimed.
+    EXPECT_TRUE(isEqualSessionId(mSessionId2, mTestDrm2->reclaimedSessions()[0]));
+}
+
+TEST_F(DrmSessionManagerTest, getLowestPriority) {
+    testGetLowestPriority();
+}
+
+TEST_F(DrmSessionManagerTest, getLeastUsedSession_l) {
+    testGetLeastUsedSession();
+}
+
+} // namespace android
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
new file mode 100644
index 0000000..a9a2d3c
--- /dev/null
+++ b/services/mediaextractor/Android.mk
@@ -0,0 +1,25 @@
+LOCAL_PATH := $(call my-dir)
+
+# service library
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES := MediaExtractorService.cpp
+LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
+LOCAL_MODULE:= libmediaextractorservice
+LOCAL_32_BIT_ONLY := true
+include $(BUILD_SHARED_LIBRARY)
+
+
+# service executable
+include $(CLEAR_VARS)
+LOCAL_REQUIRED_MODULES_arm := mediaextractor-seccomp.policy
+LOCAL_REQUIRED_MODULES_x86 := mediaextractor-seccomp.policy
+LOCAL_SRC_FILES := main_extractorservice.cpp minijail/minijail.cpp
+LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils liblog libicuuc libminijail
+LOCAL_STATIC_LIBRARIES := libicuandroid_utils
+LOCAL_MODULE:= mediaextractor
+LOCAL_32_BIT_ONLY := true
+LOCAL_INIT_RC := mediaextractor.rc
+LOCAL_C_INCLUDES := frameworks/av/media/libmedia
+include $(BUILD_EXECUTABLE)
+
+include $(call all-makefiles-under, $(LOCAL_PATH))
diff --git a/services/mediaextractor/MODULE_LICENSE_APACHE2 b/services/mediaextractor/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/services/mediaextractor/MODULE_LICENSE_APACHE2
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
new file mode 100644
index 0000000..4a80166
--- /dev/null
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaExtractorService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <utils/Vector.h>
+
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaExtractor.h>
+#include "MediaExtractorService.h"
+
+namespace android {
+
+sp<IMediaExtractor> MediaExtractorService::makeExtractor(
+        const sp<IDataSource> &remoteSource, const char *mime) {
+    ALOGV("@@@ MediaExtractorService::makeExtractor for %s", mime);
+
+    sp<DataSource> localSource = DataSource::CreateFromIDataSource(remoteSource);
+
+    sp<IMediaExtractor> ret = MediaExtractor::CreateFromService(localSource, mime);
+
+    ALOGV("extractor service created %p (%s)",
+            ret.get(),
+            ret == NULL ? "" : ret->name());
+
+    if (ret != NULL) {
+        registerMediaExtractor(ret, localSource, mime);
+    }
+
+    return ret;
+}
+
+status_t MediaExtractorService::dump(int fd, const Vector<String16>& args) {
+    return dumpExtractors(fd, args);
+}
+
+status_t MediaExtractorService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+        uint32_t flags)
+{
+    return BnMediaExtractorService::onTransact(code, data, reply, flags);
+}
+
+}   // namespace android
diff --git a/services/mediaextractor/MediaExtractorService.h b/services/mediaextractor/MediaExtractorService.h
new file mode 100644
index 0000000..078af0c
--- /dev/null
+++ b/services/mediaextractor/MediaExtractorService.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_EXTRACTOR_SERVICE_H
+#define ANDROID_MEDIA_EXTRACTOR_SERVICE_H
+
+#include <binder/BinderService.h>
+#include <media/IMediaExtractorService.h>
+#include <media/IMediaExtractor.h>
+
+namespace android {
+
+class MediaExtractorService : public BinderService<MediaExtractorService>, public BnMediaExtractorService
+{
+    friend class BinderService<MediaExtractorService>;    // for MediaExtractorService()
+public:
+    MediaExtractorService() : BnMediaExtractorService() { }
+    virtual ~MediaExtractorService() { }
+    virtual void onFirstRef() { }
+
+    static const char*  getServiceName() { return "media.extractor"; }
+
+    virtual sp<IMediaExtractor> makeExtractor(const sp<IDataSource> &source, const char *mime);
+    virtual status_t    dump(int fd, const Vector<String16>& args);
+
+    virtual status_t    onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+                                uint32_t flags);
+
+private:
+    Mutex               mLock;
+};
+
+}   // namespace android
+
+#endif  // ANDROID_MEDIA_EXTRACTOR_SERVICE_H
diff --git a/services/mediaextractor/NOTICE b/services/mediaextractor/NOTICE
new file mode 100644
index 0000000..34bdaf1
--- /dev/null
+++ b/services/mediaextractor/NOTICE
@@ -0,0 +1,190 @@
+
+   Copyright (c) 2005-2015, The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
diff --git a/services/mediaextractor/main_extractorservice.cpp b/services/mediaextractor/main_extractorservice.cpp
new file mode 100644
index 0000000..245489e
--- /dev/null
+++ b/services/mediaextractor/main_extractorservice.cpp
@@ -0,0 +1,55 @@
+/*
+**
+** Copyright 2008, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "mediaextractor"
+//#define LOG_NDEBUG 0
+
+#include <fcntl.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <binder/IPCThreadState.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+
+// from LOCAL_C_INCLUDES
+#include "IcuUtils.h"
+#include "MediaExtractorService.h"
+#include "MediaUtils.h"
+#include "minijail/minijail.h"
+
+using namespace android;
+
+int main(int argc __unused, char** argv)
+{
+    limitProcessMemory(
+        "ro.media.maxmem", /* property that defines limit */
+        SIZE_MAX, /* upper limit in bytes */
+        20 /* upper limit as percentage of physical RAM */);
+
+    signal(SIGPIPE, SIG_IGN);
+    MiniJail();
+
+    InitializeIcuOrDie();
+
+    strcpy(argv[0], "media.extractor");
+    sp<ProcessState> proc(ProcessState::self());
+    sp<IServiceManager> sm = defaultServiceManager();
+    MediaExtractorService::instantiate();
+    ProcessState::self()->startThreadPool();
+    IPCThreadState::self()->joinThreadPool();
+}
diff --git a/services/mediaextractor/mediaextractor.rc b/services/mediaextractor/mediaextractor.rc
new file mode 100644
index 0000000..5fc2941
--- /dev/null
+++ b/services/mediaextractor/mediaextractor.rc
@@ -0,0 +1,6 @@
+service mediaextractor /system/bin/mediaextractor
+    class main
+    user mediaex
+    group drmrpc mediadrm
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediaextractor/minijail/Android.mk b/services/mediaextractor/minijail/Android.mk
new file mode 100644
index 0000000..79c5505
--- /dev/null
+++ b/services/mediaextractor/minijail/Android.mk
@@ -0,0 +1,28 @@
+LOCAL_PATH := $(call my-dir)
+
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86 x86_64))
+include $(CLEAR_VARS)
+LOCAL_MODULE := mediaextractor-seccomp.policy
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+
+# mediaextractor runs in 32-bit combatibility mode. For 64 bit architectures,
+# use the 32 bit policy
+ifdef TARGET_2ND_ARCH
+    LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_2ND_ARCH).policy
+else
+    LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
+endif
+
+# allow device specific additions to the syscall whitelist
+ifneq (,$(wildcard $(BOARD_SECCOMP_POLICY)/mediaextractor-seccomp.policy))
+    LOCAL_SRC_FILES += $(BOARD_SECCOMP_POLICY)/mediaextractor-seccomp.policy
+endif
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_SRC_FILES)
+	@mkdir -p $(dir $@)
+	$(hide) cat > $@ $^
+
+endif
diff --git a/services/mediaextractor/minijail/minijail.cpp b/services/mediaextractor/minijail/minijail.cpp
new file mode 100644
index 0000000..421a1e0
--- /dev/null
+++ b/services/mediaextractor/minijail/minijail.cpp
@@ -0,0 +1,50 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <cutils/log.h>
+#include <libminijail.h>
+
+#include "minijail.h"
+
+namespace android {
+
+/* Must match location in Android.mk */
+static const char kSeccompFilePath[] = "/system/etc/seccomp_policy/mediaextractor-seccomp.policy";
+
+int MiniJail()
+{
+    /* no seccomp policy for this architecture */
+    if (access(kSeccompFilePath, R_OK) == -1) {
+        ALOGW("No seccomp filter defined for this architecture.");
+        return 0;
+    }
+
+    struct minijail *jail = minijail_new();
+    if (jail == NULL) {
+        ALOGW("Failed to create minijail.");
+        return -1;
+    }
+
+    minijail_no_new_privs(jail);
+    minijail_log_seccomp_filter_failures(jail);
+    minijail_use_seccomp_filter(jail);
+    minijail_parse_seccomp_filters(jail, kSeccompFilePath);
+    minijail_enter(jail);
+    minijail_destroy(jail);
+    return 0;
+}
+}
diff --git a/services/mediaextractor/minijail/minijail.h b/services/mediaextractor/minijail/minijail.h
new file mode 100644
index 0000000..6ea4487
--- /dev/null
+++ b/services/mediaextractor/minijail/minijail.h
@@ -0,0 +1,20 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+namespace android {
+int MiniJail();
+}
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
new file mode 100644
index 0000000..165694c
--- /dev/null
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
@@ -0,0 +1,47 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+mmap2: 1
+close: 1
+munmap: 1
+dup: 1
+mprotect: 1
+getuid32: 1
+setpriority: 1
+sigaltstack: 1
+openat: 1
+clone: 1
+read: 1
+clock_gettime: 1
+lseek: 1
+writev: 1
+fstatat64: 1
+fstat64: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+faccessat: 1
+madvise: 1
+brk: 1
+sched_setscheduler: 1
+gettid: 1
+rt_sigprocmask: 1
+sched_yield: 1
+ugetrlimit: 1
+geteuid32: 1
+getgid32: 1
+getegid32: 1
+getgroups32: 1
+
+# for attaching to debuggerd on process crash
+sigaction: 1
+tgkill: 1
+socket: 1
+connect: 1
+fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
new file mode 100644
index 0000000..67976ff
--- /dev/null
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
@@ -0,0 +1,45 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+close: 1
+dup: 1
+munmap: 1
+mmap2: 1
+madvise: 1
+openat: 1
+clock_gettime: 1
+writev: 1
+brk: 1
+mprotect: 1
+read: 1
+lseek: 1
+getuid32: 1
+clone: 1
+setpriority: 1
+sigaltstack: 1
+fstatat64: 1
+fstat64: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+faccessat: 1
+sched_setscheduler: 1
+ugetrlimit: 1
+getrlimit: 1
+geteuid32: 1
+getgid32: 1
+getegid32: 1
+getgroups32: 1
+
+# for attaching to debuggerd on process crash
+socketcall: 1
+sigaction: 1
+tgkill: 1
+rt_sigprocmask: 1
+fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index 03438bf..88f98cf 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -6,10 +6,12 @@
 
 LOCAL_SHARED_LIBRARIES := libmedia libbinder libutils liblog libnbaio
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
 LOCAL_MODULE:= libmedialogservice
 
-LOCAL_32_BIT_ONLY := true
-
 LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
 
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index 41dab1f..f85aa13 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -26,9 +26,11 @@
 
 namespace android {
 
+static const char kDeadlockedString[] = "MediaLogService may be deadlocked\n";
+
 void MediaLogService::registerWriter(const sp<IMemory>& shared, size_t size, const char *name)
 {
-    if (IPCThreadState::self()->getCallingUid() != AID_MEDIA || shared == 0 ||
+    if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0 ||
             size < kMinSize || size > kMaxSize || name == NULL ||
             shared->size() < NBLog::Timeline::sharedSize(size)) {
         return;
@@ -41,7 +43,7 @@
 
 void MediaLogService::unregisterWriter(const sp<IMemory>& shared)
 {
-    if (IPCThreadState::self()->getCallingUid() != AID_MEDIA || shared == 0) {
+    if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0) {
         return;
     }
     Mutex::Autolock _l(mLock);
@@ -54,11 +56,24 @@
     }
 }
 
+bool MediaLogService::dumpTryLock(Mutex& mutex)
+{
+    bool locked = false;
+    for (int i = 0; i < kDumpLockRetries; ++i) {
+        if (mutex.tryLock() == NO_ERROR) {
+            locked = true;
+            break;
+        }
+        usleep(kDumpLockSleepUs);
+    }
+    return locked;
+}
+
 status_t MediaLogService::dump(int fd, const Vector<String16>& args __unused)
 {
     // FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp
     static const String16 sDump("android.permission.DUMP");
-    if (!(IPCThreadState::self()->getCallingUid() == AID_MEDIA ||
+    if (!(IPCThreadState::self()->getCallingUid() == AID_AUDIOSERVER ||
             PermissionCache::checkCallingPermission(sDump))) {
         dprintf(fd, "Permission Denial: can't dump media.log from pid=%d, uid=%d\n",
                 IPCThreadState::self()->getCallingPid(),
@@ -68,9 +83,22 @@
 
     Vector<NamedReader> namedReaders;
     {
-        Mutex::Autolock _l(mLock);
+        bool locked = dumpTryLock(mLock);
+
+        // failed to lock - MediaLogService is probably deadlocked
+        if (!locked) {
+            String8 result(kDeadlockedString);
+            if (fd >= 0) {
+                write(fd, result.string(), result.size());
+            } else {
+                ALOGW("%s:", result.string());
+            }
+            return NO_ERROR;
+        }
         namedReaders = mNamedReaders;
+        mLock.unlock();
     }
+
     for (size_t i = 0; i < namedReaders.size(); i++) {
         const NamedReader& namedReader = namedReaders[i];
         if (fd >= 0) {
diff --git a/services/medialog/MediaLogService.h b/services/medialog/MediaLogService.h
index 2d89a41..c9bf2eb 100644
--- a/services/medialog/MediaLogService.h
+++ b/services/medialog/MediaLogService.h
@@ -43,6 +43,12 @@
                                 uint32_t flags);
 
 private:
+
+    // Internal dump
+    static const int kDumpLockRetries = 50;
+    static const int kDumpLockSleepUs = 20000;
+    static bool dumpTryLock(Mutex& mutex);
+
     Mutex               mLock;
     class NamedReader {
     public:
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 6781a36..3ed0189 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -19,6 +19,7 @@
 #define LOG_TAG "ResourceManagerService"
 #include <utils/Log.h>
 
+#include <binder/IMediaResourceMonitor.h>
 #include <binder/IServiceManager.h>
 #include <dirent.h>
 #include <media/stagefright/ProcessInfo.h>
@@ -42,7 +43,7 @@
     return itemsStr;
 }
 
-static bool hasResourceType(String8 type, Vector<MediaResource> resources) {
+static bool hasResourceType(MediaResource::Type type, Vector<MediaResource> resources) {
     for (size_t i = 0; i < resources.size(); ++i) {
         if (resources[i].mType == type) {
             return true;
@@ -51,7 +52,7 @@
     return false;
 }
 
-static bool hasResourceType(String8 type, ResourceInfos infos) {
+static bool hasResourceType(MediaResource::Type type, ResourceInfos infos) {
     for (size_t i = 0; i < infos.size(); ++i) {
         if (hasResourceType(type, infos[i].resources)) {
             return true;
@@ -89,6 +90,21 @@
     return infos.editItemAt(infos.size() - 1);
 }
 
+static void notifyResourceGranted(int pid, const Vector<MediaResource> &resources) {
+    static const char* const kServiceName = "media_resource_monitor";
+    sp<IBinder> binder = defaultServiceManager()->checkService(String16(kServiceName));
+    if (binder != NULL) {
+        sp<IMediaResourceMonitor> service = interface_cast<IMediaResourceMonitor>(binder);
+        for (size_t i = 0; i < resources.size(); ++i) {
+            if (resources[i].mSubType == MediaResource::kAudioCodec) {
+                service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_AUDIO_CODEC);
+            } else if (resources[i].mSubType == MediaResource::kVideoCodec) {
+                service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_VIDEO_CODEC);
+            }
+        }
+    }
+}
+
 status_t ResourceManagerService::dump(int fd, const Vector<String16>& /* args */) {
     String8 result;
 
@@ -154,10 +170,7 @@
 }
 
 ResourceManagerService::ResourceManagerService()
-    : mProcessInfo(new ProcessInfo()),
-      mServiceLog(new ServiceLog()),
-      mSupportsMultipleSecureCodecs(true),
-      mSupportsSecureWithNonSecureCodec(true) {}
+    : ResourceManagerService(new ProcessInfo()) {}
 
 ResourceManagerService::ResourceManagerService(sp<ProcessInfoInterface> processInfo)
     : mProcessInfo(processInfo),
@@ -193,10 +206,15 @@
     mServiceLog->add(log);
 
     Mutex::Autolock lock(mLock);
+    if (!mProcessInfo->isValidPid(pid)) {
+        ALOGE("Rejected addResource call with invalid pid.");
+        return;
+    }
     ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
     ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
     // TODO: do the merge instead of append.
     info.resources.appendVector(resources);
+    notifyResourceGranted(pid, resources);
 }
 
 void ResourceManagerService::removeResource(int pid, int64_t clientId) {
@@ -206,6 +224,10 @@
     mServiceLog->add(log);
 
     Mutex::Autolock lock(mLock);
+    if (!mProcessInfo->isValidPid(pid)) {
+        ALOGE("Rejected removeResource call with invalid pid.");
+        return;
+    }
     ssize_t index = mMap.indexOfKey(pid);
     if (index < 0) {
         ALOGV("removeResource: didn't find pid %d for clientId %lld", pid, (long long) clientId);
@@ -245,16 +267,20 @@
     Vector<sp<IResourceManagerClient>> clients;
     {
         Mutex::Autolock lock(mLock);
+        if (!mProcessInfo->isValidPid(callingPid)) {
+            ALOGE("Rejected reclaimResource call with invalid callingPid.");
+            return false;
+        }
         const MediaResource *secureCodec = NULL;
         const MediaResource *nonSecureCodec = NULL;
         const MediaResource *graphicMemory = NULL;
         for (size_t i = 0; i < resources.size(); ++i) {
-            String8 type = resources[i].mType;
-            if (resources[i].mType == kResourceSecureCodec) {
+            MediaResource::Type type = resources[i].mType;
+            if (resources[i].mType == MediaResource::kSecureCodec) {
                 secureCodec = &resources[i];
-            } else if (type == kResourceNonSecureCodec) {
+            } else if (type == MediaResource::kNonSecureCodec) {
                 nonSecureCodec = &resources[i];
-            } else if (type == kResourceGraphicMemory) {
+            } else if (type == MediaResource::kGraphicMemory) {
                 graphicMemory = &resources[i];
             }
         }
@@ -262,19 +288,19 @@
         // first pass to handle secure/non-secure codec conflict
         if (secureCodec != NULL) {
             if (!mSupportsMultipleSecureCodecs) {
-                if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+                if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) {
                     return false;
                 }
             }
             if (!mSupportsSecureWithNonSecureCodec) {
-                if (!getAllClients_l(callingPid, String8(kResourceNonSecureCodec), &clients)) {
+                if (!getAllClients_l(callingPid, MediaResource::kNonSecureCodec, &clients)) {
                     return false;
                 }
             }
         }
         if (nonSecureCodec != NULL) {
             if (!mSupportsSecureWithNonSecureCodec) {
-                if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+                if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) {
                     return false;
                 }
             }
@@ -294,11 +320,11 @@
         if (clients.size() == 0) {
             // if we are here, run the fourth pass to free one codec with the different type.
             if (secureCodec != NULL) {
-                MediaResource temp(String8(kResourceNonSecureCodec), 1);
+                MediaResource temp(MediaResource::kNonSecureCodec, 1);
                 getClientForResource_l(callingPid, &temp, &clients);
             }
             if (nonSecureCodec != NULL) {
-                MediaResource temp(String8(kResourceSecureCodec), 1);
+                MediaResource temp(MediaResource::kSecureCodec, 1);
                 getClientForResource_l(callingPid, &temp, &clients);
             }
         }
@@ -348,7 +374,7 @@
 }
 
 bool ResourceManagerService::getAllClients_l(
-        int callingPid, const String8 &type, Vector<sp<IResourceManagerClient>> *clients) {
+        int callingPid, MediaResource::Type type, Vector<sp<IResourceManagerClient>> *clients) {
     Vector<sp<IResourceManagerClient>> temp;
     for (size_t i = 0; i < mMap.size(); ++i) {
         ResourceInfos &infos = mMap.editValueAt(i);
@@ -358,7 +384,7 @@
                     // some higher/equal priority process owns the resource,
                     // this request can't be fulfilled.
                     ALOGE("getAllClients_l: can't reclaim resource %s from pid %d",
-                            type.string(), mMap.keyAt(i));
+                            asString(type), mMap.keyAt(i));
                     return false;
                 }
                 temp.push_back(infos[j].client);
@@ -366,7 +392,7 @@
         }
     }
     if (temp.size() == 0) {
-        ALOGV("getAllClients_l: didn't find any resource %s", type.string());
+        ALOGV("getAllClients_l: didn't find any resource %s", asString(type));
         return true;
     }
     clients->appendVector(temp);
@@ -374,7 +400,7 @@
 }
 
 bool ResourceManagerService::getLowestPriorityBiggestClient_l(
-        int callingPid, const String8 &type, sp<IResourceManagerClient> *client) {
+        int callingPid, MediaResource::Type type, sp<IResourceManagerClient> *client) {
     int lowestPriorityPid;
     int lowestPriority;
     int callingPriority;
@@ -399,7 +425,7 @@
 }
 
 bool ResourceManagerService::getLowestPriorityPid_l(
-        const String8 &type, int *lowestPriorityPid, int *lowestPriority) {
+        MediaResource::Type type, int *lowestPriorityPid, int *lowestPriority) {
     int pid = -1;
     int priority = -1;
     for (size_t i = 0; i < mMap.size(); ++i) {
@@ -446,7 +472,7 @@
 }
 
 bool ResourceManagerService::getBiggestClient_l(
-        int pid, const String8 &type, sp<IResourceManagerClient> *client) {
+        int pid, MediaResource::Type type, sp<IResourceManagerClient> *client) {
     ssize_t index = mMap.indexOfKey(pid);
     if (index < 0) {
         ALOGE("getBiggestClient_l: can't find resource info for pid %d", pid);
@@ -469,7 +495,7 @@
     }
 
     if (clientTemp == NULL) {
-        ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", type.string(), pid);
+        ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", asString(type), pid);
         return false;
     }
 
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 4769373..8f6fe9a 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -79,22 +79,22 @@
     // Gets the list of all the clients who own the specified resource type.
     // Returns false if any client belongs to a process with higher priority than the
     // calling process. The clients will remain unchanged if returns false.
-    bool getAllClients_l(int callingPid, const String8 &type,
+    bool getAllClients_l(int callingPid, MediaResource::Type type,
             Vector<sp<IResourceManagerClient>> *clients);
 
     // Gets the client who owns specified resource type from lowest possible priority process.
     // Returns false if the calling process priority is not higher than the lowest process
     // priority. The client will remain unchanged if returns false.
-    bool getLowestPriorityBiggestClient_l(int callingPid, const String8 &type,
+    bool getLowestPriorityBiggestClient_l(int callingPid, MediaResource::Type type,
             sp<IResourceManagerClient> *client);
 
     // Gets lowest priority process that has the specified resource type.
     // Returns false if failed. The output parameters will remain unchanged if failed.
-    bool getLowestPriorityPid_l(const String8 &type, int *pid, int *priority);
+    bool getLowestPriorityPid_l(MediaResource::Type type, int *pid, int *priority);
 
     // Gets the client who owns biggest piece of specified resource type from pid.
     // Returns false if failed. The client will remain unchanged if failed.
-    bool getBiggestClient_l(int pid, const String8 &type, sp<IResourceManagerClient> *client);
+    bool getBiggestClient_l(int pid, MediaResource::Type type, sp<IResourceManagerClient> *client);
 
     bool isCallingPriorityHigher_l(int callingPid, int pid);
 
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index df49ddc..62b7711 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -44,6 +44,10 @@
         return true;
     }
 
+    virtual bool isValidPid(int /* pid */) {
+        return true;
+    }
+
 private:
     DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
 };
@@ -148,24 +152,24 @@
     void addResource() {
         // kTestPid1 mTestClient1
         Vector<MediaResource> resources1;
-        resources1.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+        resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
         mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources1);
-        resources1.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+        resources1.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
         Vector<MediaResource> resources11;
-        resources11.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+        resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
         mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources11);
 
         // kTestPid2 mTestClient2
         Vector<MediaResource> resources2;
-        resources2.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
-        resources2.push_back(MediaResource(String8(kResourceGraphicMemory), 300));
+        resources2.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+        resources2.push_back(MediaResource(MediaResource::kGraphicMemory, 300));
         mService->addResource(kTestPid2, getId(mTestClient2), mTestClient2, resources2);
 
         // kTestPid2 mTestClient3
         Vector<MediaResource> resources3;
         mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
-        resources3.push_back(MediaResource(String8(kResourceSecureCodec), 1));
-        resources3.push_back(MediaResource(String8(kResourceGraphicMemory), 100));
+        resources3.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+        resources3.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
         mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
 
         const PidResourceInfosMap &map = mService->mMap;
@@ -233,14 +237,12 @@
     void testGetAllClients() {
         addResource();
 
-        String8 type = String8(kResourceSecureCodec);
-        String8 unknowType = String8("unknowType");
+        MediaResource::Type type = MediaResource::kSecureCodec;
         Vector<sp<IResourceManagerClient> > clients;
         EXPECT_FALSE(mService->getAllClients_l(kLowPriorityPid, type, &clients));
         // some higher priority process (e.g. kTestPid2) owns the resource, so getAllClients_l
         // will fail.
         EXPECT_FALSE(mService->getAllClients_l(kMidPriorityPid, type, &clients));
-        EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, unknowType, &clients));
         EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, type, &clients));
 
         EXPECT_EQ(2u, clients.size());
@@ -250,8 +252,8 @@
 
     void testReclaimResourceSecure() {
         Vector<MediaResource> resources;
-        resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
-        resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+        resources.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+        resources.push_back(MediaResource(MediaResource::kGraphicMemory, 150));
 
         // ### secure codec can't coexist and secure codec can coexist with non-secure codec ###
         {
@@ -352,7 +354,7 @@
             mService->mSupportsSecureWithNonSecureCodec = true;
 
             Vector<MediaResource> resources;
-            resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+            resources.push_back(MediaResource(MediaResource::kSecureCodec, 1));
 
             EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
             // secure codec from lowest process got reclaimed
@@ -370,8 +372,8 @@
 
     void testReclaimResourceNonSecure() {
         Vector<MediaResource> resources;
-        resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
-        resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+        resources.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+        resources.push_back(MediaResource(MediaResource::kGraphicMemory, 150));
 
         // ### secure codec can't coexist with non-secure codec ###
         {
@@ -425,7 +427,7 @@
             mService->mSupportsSecureWithNonSecureCodec = true;
 
             Vector<MediaResource> resources;
-            resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+            resources.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
 
             EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
             // one non secure codec from lowest process got reclaimed
@@ -441,7 +443,7 @@
     }
 
     void testGetLowestPriorityBiggestClient() {
-        String8 type = String8(kResourceGraphicMemory);
+        MediaResource::Type type = MediaResource::kGraphicMemory;
         sp<IResourceManagerClient> client;
         EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
 
@@ -450,8 +452,8 @@
         EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kLowPriorityPid, type, &client));
         EXPECT_TRUE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
 
-        // kTestPid1 is the lowest priority process with kResourceGraphicMemory.
-        // mTestClient1 has the largest kResourceGraphicMemory within kTestPid1.
+        // kTestPid1 is the lowest priority process with MediaResource::kGraphicMemory.
+        // mTestClient1 has the largest MediaResource::kGraphicMemory within kTestPid1.
         EXPECT_EQ(mTestClient1, client);
     }
 
@@ -460,7 +462,7 @@
         int priority;
         TestProcessInfo processInfo;
 
-        String8 type = String8(kResourceGraphicMemory);
+        MediaResource::Type type = MediaResource::kGraphicMemory;
         EXPECT_FALSE(mService->getLowestPriorityPid_l(type, &pid, &priority));
 
         addResource();
@@ -471,7 +473,7 @@
         processInfo.getPriority(kTestPid1, &priority1);
         EXPECT_EQ(priority1, priority);
 
-        type = String8(kResourceNonSecureCodec);
+        type = MediaResource::kNonSecureCodec;
         EXPECT_TRUE(mService->getLowestPriorityPid_l(type, &pid, &priority));
         EXPECT_EQ(kTestPid2, pid);
         int priority2;
@@ -480,7 +482,7 @@
     }
 
     void testGetBiggestClient() {
-        String8 type = String8(kResourceGraphicMemory);
+        MediaResource::Type type = MediaResource::kGraphicMemory;
         sp<IResourceManagerClient> client;
         EXPECT_FALSE(mService->getBiggestClient_l(kTestPid2, type, &client));
 
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index 9ee5666..f5d74d3 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -31,6 +31,10 @@
     libradio \
     libradio_metadata
 
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
 LOCAL_MODULE:= libradioservice
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/radio/RadioRegions.h b/services/radio/RadioRegions.h
index 3335b8a..d40ee83 100644
--- a/services/radio/RadioRegions.h
+++ b/services/radio/RadioRegions.h
@@ -67,11 +67,14 @@
             1,
             {RADIO_BAND_SPACING_FM_ITU1},
             {
-            RADIO_DEEMPHASIS_50,
-            true,
-            RADIO_RDS_WORLD,
-            true,
-            true,
+                {
+                RADIO_DEEMPHASIS_50,
+                true,
+                RADIO_RDS_WORLD,
+                true,
+                true,
+                true,
+                }
             }
         }
     },
@@ -85,11 +88,14 @@
             1,
             {RADIO_BAND_SPACING_FM_ITU2},
             {
-            RADIO_DEEMPHASIS_75,
-            true,
-            RADIO_RDS_US,
-            true,
-            true,
+                {
+                RADIO_DEEMPHASIS_75,
+                true,
+                RADIO_RDS_US,
+                true,
+                true,
+                true,
+                }
             }
         }
     },
@@ -103,11 +109,14 @@
             1,
             {RADIO_BAND_SPACING_FM_JAPAN},
             {
-            RADIO_DEEMPHASIS_50,
-            true,
-            RADIO_RDS_WORLD,
-            true,
-            true,
+                {
+                RADIO_DEEMPHASIS_50,
+                true,
+                RADIO_RDS_WORLD,
+                true,
+                true,
+                true,
+                }
             }
         }
     },
@@ -121,11 +130,14 @@
             1,
             {RADIO_BAND_SPACING_FM_ITU1},
             {
-            RADIO_DEEMPHASIS_75,
-            true,
-            RADIO_RDS_WORLD,
-            true,
-            true,
+                {
+                RADIO_DEEMPHASIS_75,
+                true,
+                RADIO_RDS_WORLD,
+                true,
+                true,
+                true,
+                }
             }
         }
     },
@@ -139,11 +151,14 @@
             1,
             {RADIO_BAND_SPACING_FM_OIRT},
             {
-            RADIO_DEEMPHASIS_50,
-            true,
-            RADIO_RDS_WORLD,
-            true,
-            true,
+                {
+                RADIO_DEEMPHASIS_50,
+                true,
+                RADIO_RDS_WORLD,
+                true,
+                true,
+                true,
+                }
             }
         }
     },
@@ -157,11 +172,14 @@
             1,
             {RADIO_BAND_SPACING_FM_ITU2},
             {
-            RADIO_DEEMPHASIS_75,
-            true,
-            RADIO_RDS_US,
-            true,
-            true,
+                {
+                RADIO_DEEMPHASIS_75,
+                true,
+                RADIO_RDS_US,
+                true,
+                true,
+                true,
+                }
             }
         }
     },
diff --git a/services/radio/RadioService.cpp b/services/radio/RadioService.cpp
index cd0f5f3..5a3f750 100644
--- a/services/radio/RadioService.cpp
+++ b/services/radio/RadioService.cpp
@@ -349,6 +349,7 @@
         }
         break;
     case RADIO_EVENT_TA:
+    case RADIO_EVENT_EA:
     case RADIO_EVENT_ANTENNA:
     case RADIO_EVENT_CONTROL:
         event->on = halEvent->on;
@@ -734,7 +735,7 @@
         }
     } else {
         mConfig = *config;
-        status == INVALID_OPERATION;
+        status = INVALID_OPERATION;
     }
 
     return status;
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index ecc49ae..e8e18b8 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -38,6 +38,8 @@
 LOCAL_C_INCLUDES += \
     $(TOPDIR)frameworks/av/services/audioflinger
 
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
 LOCAL_MODULE:= libsoundtriggerservice
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 9de6fe2..bb2416a 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -61,16 +61,17 @@
     rc = hw_get_module_by_class(SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, &mod);
     if (rc != 0) {
         ALOGE("couldn't load sound trigger module %s.%s (%s)",
-              SOUND_TRIGGER_HARDWARE_MODULE_ID, "primary", strerror(-rc));
+              SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, strerror(-rc));
         return;
     }
     rc = sound_trigger_hw_device_open(mod, &dev);
     if (rc != 0) {
         ALOGE("couldn't open sound trigger hw device in %s.%s (%s)",
-              SOUND_TRIGGER_HARDWARE_MODULE_ID, "primary", strerror(-rc));
+              SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, strerror(-rc));
         return;
     }
-    if (dev->common.version != SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT) {
+    if (dev->common.version < SOUND_TRIGGER_DEVICE_API_VERSION_1_0 ||
+        dev->common.version > SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT) {
         ALOGE("wrong sound trigger hw device version %04x", dev->common.version);
         return;
     }
@@ -241,6 +242,13 @@
                     event->data_offset);
         event->data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
         break;
+    case SOUND_MODEL_TYPE_GENERIC:
+        ALOGW_IF(event->data_size != 0 && event->data_offset !=
+                    sizeof(struct sound_trigger_generic_recognition_event),
+                    "prepareRecognitionEvent_l(): invalid data offset %u for generic event type",
+                    event->data_offset);
+        event->data_offset = sizeof(struct sound_trigger_generic_recognition_event);
+        break;
     case SOUND_MODEL_TYPE_UNKNOWN:
         ALOGW_IF(event->data_size != 0 && event->data_offset !=
                     sizeof(struct sound_trigger_recognition_event),
@@ -537,19 +545,15 @@
     AutoMutex lock(mLock);
 
     if (mModels.size() >= mDescriptor.properties.max_sound_models) {
-        if (mModels.size() == 0) {
-            return INVALID_OPERATION;
-        }
-        ALOGW("loadSoundModel() max number of models exceeded %d making room for a new one",
+        ALOGW("loadSoundModel(): Not loading, max number of models (%d) would be exceeded",
               mDescriptor.properties.max_sound_models);
-        unloadSoundModel_l(mModels.valueAt(0)->mHandle);
+        return INVALID_OPERATION;
     }
 
-    status_t status = mHwDevice->load_sound_model(mHwDevice,
-                                                  sound_model,
+    status_t status = mHwDevice->load_sound_model(mHwDevice, sound_model,
                                                   SoundTriggerHwService::soundModelCallback,
-                                                  this,
-                                                  handle);
+                                                  this, handle);
+
     if (status != NO_ERROR) {
         return status;
     }
@@ -781,31 +785,60 @@
             goto exit;
         }
 
+        const bool supports_stop_all =
+            (mHwDevice->common.version >= SOUND_TRIGGER_DEVICE_API_VERSION_1_1 &&
+             mHwDevice->stop_all_recognitions);
+
+        if (supports_stop_all) {
+            mHwDevice->stop_all_recognitions(mHwDevice);
+        }
+
         for (size_t i = 0; i < mModels.size(); i++) {
             sp<Model> model = mModels.valueAt(i);
             if (model->mState == Model::STATE_ACTIVE) {
-                mHwDevice->stop_recognition(mHwDevice, model->mHandle);
-                // keep model in ACTIVE state so that event is processed by onCallbackEvent()
-                struct sound_trigger_phrase_recognition_event phraseEvent;
-                memset(&phraseEvent, 0, sizeof(struct sound_trigger_phrase_recognition_event));
-                switch (model->mType) {
-                case SOUND_MODEL_TYPE_KEYPHRASE:
-                    phraseEvent.num_phrases = model->mConfig.num_phrases;
-                    for (size_t i = 0; i < phraseEvent.num_phrases; i++) {
-                        phraseEvent.phrase_extras[i] = model->mConfig.phrases[i];
-                    }
-                    break;
-                case SOUND_MODEL_TYPE_UNKNOWN:
-                default:
-                    break;
+                if (!supports_stop_all) {
+                    mHwDevice->stop_recognition(mHwDevice, model->mHandle);
                 }
-                phraseEvent.common.status = RECOGNITION_STATUS_ABORT;
-                phraseEvent.common.type = model->mType;
-                phraseEvent.common.model = model->mHandle;
-                phraseEvent.common.data_size = 0;
-                sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&phraseEvent.common);
-                if (eventMemory != 0) {
-                    events.add(eventMemory);
+                // keep model in ACTIVE state so that event is processed by onCallbackEvent()
+                if (model->mType == SOUND_MODEL_TYPE_KEYPHRASE) {
+                    struct sound_trigger_phrase_recognition_event event;
+                    memset(&event, 0, sizeof(struct sound_trigger_phrase_recognition_event));
+                    event.num_phrases = model->mConfig.num_phrases;
+                    for (size_t i = 0; i < event.num_phrases; i++) {
+                        event.phrase_extras[i] = model->mConfig.phrases[i];
+                    }
+                    event.common.status = RECOGNITION_STATUS_ABORT;
+                    event.common.type = model->mType;
+                    event.common.model = model->mHandle;
+                    event.common.data_size = 0;
+                    sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+                    if (eventMemory != 0) {
+                        events.add(eventMemory);
+                    }
+                } else if (model->mType == SOUND_MODEL_TYPE_GENERIC) {
+                    struct sound_trigger_generic_recognition_event event;
+                    memset(&event, 0, sizeof(struct sound_trigger_generic_recognition_event));
+                    event.common.status = RECOGNITION_STATUS_ABORT;
+                    event.common.type = model->mType;
+                    event.common.model = model->mHandle;
+                    event.common.data_size = 0;
+                    sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+                    if (eventMemory != 0) {
+                        events.add(eventMemory);
+                    }
+                } else if (model->mType == SOUND_MODEL_TYPE_UNKNOWN) {
+                    struct sound_trigger_phrase_recognition_event event;
+                    memset(&event, 0, sizeof(struct sound_trigger_phrase_recognition_event));
+                    event.common.status = RECOGNITION_STATUS_ABORT;
+                    event.common.type = model->mType;
+                    event.common.model = model->mHandle;
+                    event.common.data_size = 0;
+                    sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+                    if (eventMemory != 0) {
+                        events.add(eventMemory);
+                    }
+                } else {
+                    goto exit;
                 }
             }
         }
diff --git a/soundtrigger/Android.mk b/soundtrigger/Android.mk
index d91c4c2..c794cc1 100644
--- a/soundtrigger/Android.mk
+++ b/soundtrigger/Android.mk
@@ -35,4 +35,6 @@
 
 LOCAL_MODULE:= libsoundtrigger
 
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/tools/resampler_tools/Android.mk b/tools/resampler_tools/Android.mk
index b58e4cd..bba5199 100644
--- a/tools/resampler_tools/Android.mk
+++ b/tools/resampler_tools/Android.mk
@@ -12,6 +12,6 @@
 
 LOCAL_MODULE := fir
 
+LOCAL_CFLAGS := -Werror -Wall
+
 include $(BUILD_HOST_EXECUTABLE)
-
-